]> git.proxmox.com Git - proxmox-offline-mirror.git/blame - src/mirror.rs
snapshots: add diff command
[proxmox-offline-mirror.git] / src / mirror.rs
CommitLineData
9ecde319
FG
1use std::{
2 cmp::max,
3 collections::HashMap,
4 io::Read,
5 path::{Path, PathBuf},
6};
7
8b267808 8use anyhow::{bail, format_err, Error};
9ecde319 9use flate2::bufread::GzDecoder;
d035ecb5 10use nix::libc;
49997188 11use proxmox_http::{client::sync::Client, HttpClient, HttpOptions};
d035ecb5
FG
12use proxmox_sys::fs::file_get_contents;
13
14use crate::{
8b267808 15 config::{MirrorConfig, SubscriptionKey},
d035ecb5
FG
16 convert_repo_line,
17 pool::Pool,
529111dc 18 types::{Diff, Snapshot, SNAPSHOT_REGEX},
d035ecb5
FG
19 FetchResult, Progress,
20};
9ecde319
FG
21use proxmox_apt::{
22 deb822::{
23 CheckSums, CompressionType, FileReference, FileReferenceType, PackagesFile, ReleaseFile,
24 },
25 repositories::{APTRepository, APTRepositoryPackageType},
26};
27
28use crate::helpers;
29
c598cb15
FG
30fn mirror_dir(config: &MirrorConfig) -> String {
31 format!("{}/{}", config.base_dir, config.id)
32}
33
d035ecb5 34pub(crate) fn pool(config: &MirrorConfig) -> Result<Pool, Error> {
c598cb15
FG
35 let pool_dir = format!("{}/.pool", config.base_dir);
36 Pool::open(Path::new(&mirror_dir(config)), Path::new(&pool_dir))
d035ecb5
FG
37}
38
2d13dcfc 39/// `MirrorConfig`, but some fields converted/parsed into usable types.
d035ecb5
FG
40struct ParsedMirrorConfig {
41 pub repository: APTRepository,
42 pub architectures: Vec<String>,
43 pub pool: Pool,
44 pub key: Vec<u8>,
45 pub verify: bool,
46 pub sync: bool,
8b267808 47 pub auth: Option<String>,
49997188 48 pub client: Client,
96a80415 49 pub ignore_errors: bool,
d035ecb5
FG
50}
51
52impl TryInto<ParsedMirrorConfig> for MirrorConfig {
53 type Error = anyhow::Error;
54
55 fn try_into(self) -> Result<ParsedMirrorConfig, Self::Error> {
56 let pool = pool(&self)?;
57
58 let repository = convert_repo_line(self.repository.clone())?;
59
60 let key = file_get_contents(Path::new(&self.key_path))?;
61
49997188
FG
62 let options = HttpOptions {
63 user_agent: Some("proxmox-offline-mirror 0.1".to_string()),
64 ..Default::default()
65 }; // TODO actually read version ;)
66
67 let client = Client::new(options);
8b267808 68
d035ecb5
FG
69 Ok(ParsedMirrorConfig {
70 repository,
71 architectures: self.architectures,
72 pool,
73 key,
74 verify: self.verify,
75 sync: self.sync,
8b267808 76 auth: None,
49997188 77 client,
96a80415 78 ignore_errors: self.ignore_errors,
d035ecb5
FG
79 })
80 }
81}
82
2d13dcfc 83// Helper to get absolute URL for dist-specific relative `path`.
9ecde319
FG
84fn get_dist_url(repo: &APTRepository, path: &str) -> String {
85 let dist_root = format!("{}/dists/{}", repo.uris[0], repo.suites[0]);
86
87 format!("{}/{}", dist_root, path)
88}
89
2d13dcfc 90// Helper to get dist-specific path given a `prefix` (snapshot dir) and relative `path`.
9ecde319
FG
91fn get_dist_path(repo: &APTRepository, prefix: &Path, path: &str) -> PathBuf {
92 let mut base = PathBuf::from(prefix);
93 base.push("dists");
94 base.push(&repo.suites[0]);
95 base.push(path);
96 base
97}
98
2d13dcfc 99// Helper to get generic URL given a `repo` and `path`.
9ecde319
FG
100fn get_repo_url(repo: &APTRepository, path: &str) -> String {
101 format!("{}/{}", repo.uris[0], path)
102}
103
2d13dcfc
FG
104/// Helper to fetch file from URI and optionally verify the responses checksum.
105///
106/// Only fetches and returns data, doesn't store anything anywhere.
9ecde319 107fn fetch_repo_file(
49997188 108 client: &Client,
9ecde319 109 uri: &str,
d7e210ac 110 max_size: usize,
9ecde319 111 checksums: Option<&CheckSums>,
8b267808 112 auth: Option<&str>,
9ecde319
FG
113) -> Result<FetchResult, Error> {
114 println!("-> GET '{}'..", uri);
115
49997188
FG
116 let headers = if let Some(auth) = auth {
117 let mut map = HashMap::new();
118 map.insert("Authorization".to_string(), auth.to_string());
119 Some(map)
8b267808 120 } else {
49997188 121 None
8b267808
FG
122 };
123
49997188 124 let response = client.get(uri, headers.as_ref())?;
9ecde319 125
49997188 126 let reader: Box<dyn Read> = response.into_body();
d7e210ac 127 let mut reader = reader.take(max_size as u64);
9ecde319 128 let mut data = Vec::new();
49997188 129 reader.read_to_end(&mut data)?;
9ecde319
FG
130
131 if let Some(checksums) = checksums {
132 checksums.verify(&data)?;
133 }
134
135 Ok(FetchResult {
49997188 136 fetched: data.len(),
9ecde319 137 data,
9ecde319
FG
138 })
139}
140
2d13dcfc
FG
141/// Helper to fetch InRelease (`detached` == false) or Release/Release.gpg (`detached` == true) files from repository.
142///
143/// Verifies the contained/detached signature, stores all fetched files under `prefix`, and returns the verified raw release file data.
9ecde319
FG
144fn fetch_release(
145 config: &ParsedMirrorConfig,
146 prefix: &Path,
147 detached: bool,
d2757931 148 dry_run: bool,
9ecde319
FG
149) -> Result<FetchResult, Error> {
150 let (name, fetched, sig) = if detached {
151 println!("Fetching Release/Release.gpg files");
8b267808 152 let sig = fetch_repo_file(
49997188 153 &config.client,
8b267808 154 &get_dist_url(&config.repository, "Release.gpg"),
d7e210ac 155 1024 * 1024,
8b267808
FG
156 None,
157 config.auth.as_deref(),
158 )?;
9ecde319 159 let mut fetched = fetch_repo_file(
49997188 160 &config.client,
9ecde319 161 &get_dist_url(&config.repository, "Release"),
d7e210ac 162 256 * 1024 * 1024,
9ecde319 163 None,
8b267808 164 config.auth.as_deref(),
9ecde319
FG
165 )?;
166 fetched.fetched += sig.fetched;
167 ("Release(.gpg)", fetched, Some(sig.data()))
168 } else {
169 println!("Fetching InRelease file");
170 let fetched = fetch_repo_file(
49997188 171 &config.client,
9ecde319 172 &get_dist_url(&config.repository, "InRelease"),
d7e210ac 173 256 * 1024 * 1024,
9ecde319 174 None,
8b267808 175 config.auth.as_deref(),
9ecde319
FG
176 )?;
177 ("InRelease", fetched, None)
178 };
179
180 println!("Verifying '{name}' signature using provided repository key..");
181 let content = fetched.data_ref();
182 let verified = helpers::verify_signature(content, &config.key, sig.as_deref())?;
183 println!("Success");
184
185 let sha512 = Some(openssl::sha::sha512(content));
186 let csums = CheckSums {
187 sha512,
188 ..Default::default()
189 };
190
d2757931
FG
191 if dry_run {
192 return Ok(FetchResult {
193 data: verified,
194 fetched: fetched.fetched,
195 });
196 }
197
9ecde319
FG
198 let locked = &config.pool.lock()?;
199
200 if !locked.contains(&csums) {
d035ecb5 201 locked.add_file(content, &csums, config.sync)?;
9ecde319
FG
202 }
203
204 if detached {
205 locked.link_file(
206 &csums,
207 Path::new(&get_dist_path(&config.repository, prefix, "Release")),
208 )?;
209 let sig = sig.unwrap();
210 let sha512 = Some(openssl::sha::sha512(&sig));
211 let csums = CheckSums {
212 sha512,
213 ..Default::default()
214 };
215 if !locked.contains(&csums) {
d035ecb5 216 locked.add_file(&sig, &csums, config.sync)?;
9ecde319
FG
217 }
218 locked.link_file(
219 &csums,
220 Path::new(&get_dist_path(&config.repository, prefix, "Release.gpg")),
221 )?;
222 } else {
223 locked.link_file(
224 &csums,
225 Path::new(&get_dist_path(&config.repository, prefix, "InRelease")),
226 )?;
227 }
228
229 Ok(FetchResult {
230 data: verified,
231 fetched: fetched.fetched,
232 })
233}
234
2d13dcfc
FG
235/// Helper to fetch an index file referenced by a `ReleaseFile`.
236///
237/// Since these usually come in compressed and uncompressed form, with the latter often not actually existing in the source repository as file, this fetches and if necessary decompresses to obtain a copy of the uncompressed data.
238/// Will skip fetching if both references are already available with the expected checksum in the pool, in which case they will just be re-linked under the new path.
239///
240/// Returns the uncompressed data.
9ecde319
FG
241fn fetch_index_file(
242 config: &ParsedMirrorConfig,
243 prefix: &Path,
244 reference: &FileReference,
c5fed38d 245 uncompressed: Option<&FileReference>,
8063fd36 246 by_hash: bool,
d2757931 247 dry_run: bool,
9ecde319
FG
248) -> Result<FetchResult, Error> {
249 let url = get_dist_url(&config.repository, &reference.path);
250 let path = get_dist_path(&config.repository, prefix, &reference.path);
c5fed38d
FG
251
252 if let Some(uncompressed) = uncompressed {
253 let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path);
254
255 if config.pool.contains(&reference.checksums)
256 && config.pool.contains(&uncompressed.checksums)
257 {
258 let data = config
259 .pool
260 .get_contents(&uncompressed.checksums, config.verify)?;
261
d2757931
FG
262 if dry_run {
263 return Ok(FetchResult { data, fetched: 0 });
264 }
c5fed38d
FG
265 // Ensure they're linked at current path
266 config.pool.lock()?.link_file(&reference.checksums, &path)?;
267 config
268 .pool
269 .lock()?
270 .link_file(&uncompressed.checksums, &uncompressed_path)?;
271 return Ok(FetchResult { data, fetched: 0 });
272 }
9ecde319
FG
273 }
274
8063fd36
FG
275 let urls = if by_hash {
276 let mut urls = Vec::new();
277 if let Some((base_url, _file_name)) = url.rsplit_once('/') {
278 if let Some(sha512) = reference.checksums.sha512 {
279 urls.push(format!("{base_url}/by-hash/SHA512/{}", hex::encode(sha512)));
280 }
281 if let Some(sha256) = reference.checksums.sha256 {
282 urls.push(format!("{base_url}/by-hash/SHA256/{}", hex::encode(sha256)));
283 }
284 }
285 urls.push(url);
286 urls
287 } else {
288 vec![url]
289 };
290
291 let res = urls
292 .iter()
293 .fold(None, |res, url| match res {
294 Some(Ok(res)) => Some(Ok(res)),
295 _ => Some(fetch_plain_file(
296 config,
297 url,
298 &path,
299 reference.size,
300 &reference.checksums,
301 true,
d2757931 302 dry_run,
8063fd36
FG
303 )),
304 })
305 .ok_or_else(|| format_err!("Failed to retrieve {}", reference.path))??;
9ecde319
FG
306
307 let mut buf = Vec::new();
308 let raw = res.data_ref();
309
310 let decompressed = match reference.file_type.compression() {
311 None => raw,
312 Some(CompressionType::Gzip) => {
313 let mut gz = GzDecoder::new(raw);
314 gz.read_to_end(&mut buf)?;
315 &buf[..]
316 }
317 Some(CompressionType::Bzip2) => {
318 let mut bz = bzip2::read::BzDecoder::new(raw);
319 bz.read_to_end(&mut buf)?;
320 &buf[..]
321 }
322 Some(CompressionType::Lzma) | Some(CompressionType::Xz) => {
bb1685a0 323 let mut xz = xz2::read::XzDecoder::new_multi_decoder(raw);
9ecde319
FG
324 xz.read_to_end(&mut buf)?;
325 &buf[..]
326 }
327 };
d2757931
FG
328 let res = FetchResult {
329 data: decompressed.to_owned(),
330 fetched: res.fetched,
331 };
332
333 if dry_run {
334 return Ok(res);
335 }
9ecde319
FG
336
337 let locked = &config.pool.lock()?;
c5fed38d
FG
338 if let Some(uncompressed) = uncompressed {
339 if !locked.contains(&uncompressed.checksums) {
340 locked.add_file(decompressed, &uncompressed.checksums, config.sync)?;
341 }
9ecde319 342
c5fed38d
FG
343 // Ensure it's linked at current path
344 let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path);
345 locked.link_file(&uncompressed.checksums, &uncompressed_path)?;
346 }
9ecde319 347
d2757931 348 Ok(res)
9ecde319
FG
349}
350
2d13dcfc
FG
351/// Helper to fetch arbitrary files like binary packages.
352///
353/// Will skip fetching if matching file already exists locally, in which case it will just be re-linked under the new path.
354///
355/// If need_data is false and the mirror config is set to skip verification, reading the file's content will be skipped as well if fetching was skipped.
9ecde319
FG
356fn fetch_plain_file(
357 config: &ParsedMirrorConfig,
358 url: &str,
359 file: &Path,
d7e210ac 360 max_size: usize,
9ecde319
FG
361 checksums: &CheckSums,
362 need_data: bool,
d2757931 363 dry_run: bool,
9ecde319
FG
364) -> Result<FetchResult, Error> {
365 let locked = &config.pool.lock()?;
366 let res = if locked.contains(checksums) {
367 if need_data || config.verify {
368 locked
369 .get_contents(checksums, config.verify)
370 .map(|data| FetchResult { data, fetched: 0 })?
371 } else {
372 // performance optimization for .deb files if verify is false
373 // we never need the file contents and they make up the bulk of a repo
374 FetchResult {
375 data: vec![],
376 fetched: 0,
377 }
378 }
d2757931
FG
379 } else if dry_run && !need_data {
380 FetchResult {
381 data: vec![],
382 fetched: 0,
383 }
9ecde319 384 } else {
8b267808 385 let fetched = fetch_repo_file(
49997188 386 &config.client,
8b267808 387 url,
d7e210ac 388 max_size,
8b267808
FG
389 Some(checksums),
390 config.auth.as_deref(),
391 )?;
9ecde319
FG
392 locked.add_file(fetched.data_ref(), checksums, config.verify)?;
393 fetched
394 };
395
d2757931
FG
396 if !dry_run {
397 // Ensure it's linked at current path
398 locked.link_file(checksums, file)?;
399 }
9ecde319
FG
400
401 Ok(res)
402}
403
2d13dcfc 404/// Initialize a new mirror (by creating the corresponding pool).
d035ecb5 405pub fn init(config: &MirrorConfig) -> Result<(), Error> {
c598cb15
FG
406 let pool_dir = format!("{}/.pool", config.base_dir);
407
408 let dir = format!("{}/{}", config.base_dir, config.id);
409
410 Pool::create(Path::new(&dir), Path::new(&pool_dir))?;
d035ecb5
FG
411 Ok(())
412}
413
c598cb15 414/// Destroy a mirror (by destroying the corresponding pool's link dir followed by GC).
d035ecb5
FG
415pub fn destroy(config: &MirrorConfig) -> Result<(), Error> {
416 let pool: Pool = pool(config)?;
417 pool.lock()?.destroy()?;
418
419 Ok(())
420}
421
2d13dcfc 422/// List snapshots
d035ecb5
FG
423pub fn list_snapshots(config: &MirrorConfig) -> Result<Vec<Snapshot>, Error> {
424 let _pool: Pool = pool(config)?;
425
426 let mut list: Vec<Snapshot> = vec![];
427
c598cb15
FG
428 let dir = mirror_dir(config);
429
430 let path = Path::new(&dir);
d035ecb5
FG
431
432 proxmox_sys::fs::scandir(
433 libc::AT_FDCWD,
434 path,
435 &SNAPSHOT_REGEX,
436 |_l2_fd, snapshot, file_type| {
437 if file_type != nix::dir::Type::Directory {
438 return Ok(());
439 }
440
441 list.push(snapshot.parse()?);
442
443 Ok(())
444 },
445 )?;
446
45aa8bea
FG
447 list.sort_unstable();
448
d035ecb5
FG
449 Ok(list)
450}
451
2d13dcfc
FG
452/// Create a new snapshot of the remote repository, fetching and storing files as needed.
453///
454/// Operates in three phases:
455/// - Fetch and verify release files
456/// - Fetch referenced indices according to config
457/// - Fetch binary packages referenced by package indices
458///
459/// Files will be linked in a temporary directory and only renamed to the final, valid snapshot directory at the end. In case of error, leftover `XXX.tmp` directories at the top level of `base_dir` can be safely removed once the next snapshot was successfully created, as they only contain hardlinks.
8b267808
FG
460pub fn create_snapshot(
461 config: MirrorConfig,
462 snapshot: &Snapshot,
463 subscription: Option<SubscriptionKey>,
d2757931 464 dry_run: bool,
8b267808
FG
465) -> Result<(), Error> {
466 let auth = if let Some(product) = &config.use_subscription {
467 match subscription {
468 None => {
469 bail!(
470 "Mirror {} requires a subscription key, but none given.",
471 config.id
472 );
473 }
474 Some(key) if key.product() == *product => {
475 let base64 = base64::encode(format!("{}:{}", key.key, key.server_id));
476 Some(format!("basic {base64}"))
477 }
478 Some(key) => {
479 bail!(
480 "Repository product type '{}' and key product type '{}' don't match.",
481 product,
482 key.product()
483 );
484 }
485 }
486 } else {
487 None
488 };
489
490 let mut config: ParsedMirrorConfig = config.try_into()?;
491 config.auth = auth;
9ecde319
FG
492
493 let prefix = format!("{snapshot}.tmp");
494 let prefix = Path::new(&prefix);
495
496 let mut total_progress = Progress::new();
9213b79a 497 let mut warnings = Vec::new();
9ecde319
FG
498
499 let parse_release = |res: FetchResult, name: &str| -> Result<ReleaseFile, Error> {
500 println!("Parsing {name}..");
501 let parsed: ReleaseFile = res.data[..].try_into()?;
502 println!(
503 "'{name}' file has {} referenced files..",
504 parsed.files.len()
505 );
506 Ok(parsed)
507 };
508
509 // we want both on-disk for compat reasons
d2757931 510 let res = fetch_release(&config, prefix, true, dry_run)?;
9ecde319
FG
511 total_progress.update(&res);
512 let _release = parse_release(res, "Release")?;
513
d2757931 514 let res = fetch_release(&config, prefix, false, dry_run)?;
9ecde319
FG
515 total_progress.update(&res);
516 let release = parse_release(res, "InRelease")?;
517
518 let mut per_component = HashMap::new();
519 let mut others = Vec::new();
520 let binary = &config
521 .repository
522 .types
523 .contains(&APTRepositoryPackageType::Deb);
524 let source = &config
525 .repository
526 .types
527 .contains(&APTRepositoryPackageType::DebSrc);
528
529 for (basename, references) in &release.files {
530 let reference = references.first();
531 let reference = if let Some(reference) = reference {
532 reference.clone()
533 } else {
534 continue;
535 };
536 let skip_components = !&config.repository.components.contains(&reference.component);
537
538 let skip = skip_components
539 || match &reference.file_type {
540 FileReferenceType::Ignored => true,
541 FileReferenceType::PDiff => true, // would require fetching the patches as well
9ecde319 542 FileReferenceType::Sources(_) => !source,
8a876c01
FG
543 _ => {
544 if let Some(arch) = reference.file_type.architecture() {
545 !binary || !config.architectures.contains(arch)
546 } else {
547 false
548 }
549 }
9ecde319
FG
550 };
551 if skip {
552 println!("Skipping {}", reference.path);
553 others.push(reference);
554 } else {
555 let list = per_component
556 .entry(reference.component)
557 .or_insert_with(Vec::new);
558 list.push(basename);
559 }
560 }
561 println!();
562
563 let mut indices_size = 0_usize;
564 let mut total_count = 0;
565
566 for (component, references) in &per_component {
567 println!("Component '{component}'");
568
569 let mut component_indices_size = 0;
570
571 for basename in references {
572 for reference in release.files.get(*basename).unwrap() {
573 println!("\t{:?}: {:?}", reference.path, reference.file_type);
574 component_indices_size += reference.size;
575 }
576 }
577 indices_size += component_indices_size;
578
579 let component_count = references.len();
580 total_count += component_count;
581
582 println!("Component references count: {component_count}");
583 println!("Component indices size: {component_indices_size}");
584 if references.is_empty() {
585 println!("\tNo references found..");
586 }
587 }
588 println!("Total indices count: {total_count}");
589 println!("Total indices size: {indices_size}");
590
591 if !others.is_empty() {
592 println!("Skipped {} references", others.len());
593 }
594 println!();
595
596 let mut packages_size = 0_usize;
597 let mut packages_indices = HashMap::new();
7829ab74 598 let mut failed_references = Vec::new();
9ecde319
FG
599 for (component, references) in per_component {
600 println!("\nFetching indices for component '{component}'");
601 let mut component_deb_size = 0;
602 let mut fetch_progress = Progress::new();
603
604 for basename in references {
605 println!("\tFetching '{basename}'..");
606 let files = release.files.get(basename).unwrap();
c5fed38d
FG
607 let uncompressed_ref = files.iter().find(|reference| reference.path == *basename);
608
9ecde319
FG
609 let mut package_index_data = None;
610
611 for reference in files {
612 // if both compressed and uncompressed are referenced, the uncompressed file may not exist on the server
c5fed38d 613 if Some(reference) == uncompressed_ref && files.len() > 1 {
9ecde319
FG
614 continue;
615 }
616
617 // this will ensure the uncompressed file will be written locally
8063fd36
FG
618 let res = match fetch_index_file(
619 &config,
620 prefix,
621 reference,
622 uncompressed_ref,
623 release.aquire_by_hash,
d2757931 624 dry_run,
8063fd36 625 ) {
7829ab74
FG
626 Ok(res) => res,
627 Err(err) if !reference.file_type.is_package_index() => {
9213b79a 628 let msg = format!(
7829ab74
FG
629 "Failed to fetch '{:?}' type reference '{}', skipping - {err}",
630 reference.file_type, reference.path
631 );
9213b79a
FG
632 eprintln!("{msg}");
633 warnings.push(msg);
7829ab74
FG
634 failed_references.push(reference);
635 continue;
636 }
36dfc650 637 Err(err) => return Err(err),
7829ab74 638 };
9ecde319
FG
639 fetch_progress.update(&res);
640
641 if package_index_data.is_none() && reference.file_type.is_package_index() {
642 package_index_data = Some(res.data());
643 }
644 }
645 if let Some(data) = package_index_data {
646 let packages: PackagesFile = data[..].try_into()?;
647 let size: usize = packages.files.iter().map(|p| p.size).sum();
648 println!("\t{} packages totalling {size}", packages.files.len());
649 component_deb_size += size;
650
651 packages_indices.entry(basename).or_insert(packages);
652 }
653 println!("Progress: {fetch_progress}");
654 }
655 println!("Total deb size for component: {component_deb_size}");
656 packages_size += component_deb_size;
657 total_progress += fetch_progress;
658 }
659 println!("Total deb size: {packages_size}");
7829ab74
FG
660 if !failed_references.is_empty() {
661 eprintln!("Failed to download non-package-index references:");
662 for reference in failed_references {
663 eprintln!("\t{}", reference.path);
664 }
665 }
9ecde319
FG
666
667 println!("\nFetching packages..");
d2757931 668 let mut dry_run_progress = Progress::new();
9ecde319
FG
669 for (basename, references) in packages_indices {
670 let total_files = references.files.len();
671 if total_files == 0 {
672 println!("\n{basename} - no files, skipping.");
673 continue;
674 } else {
675 println!("\n{basename} - {total_files} total file(s)");
676 }
677
678 let mut fetch_progress = Progress::new();
679 for package in references.files {
d2757931
FG
680 let url = get_repo_url(&config.repository, &package.file);
681
682 if dry_run {
683 if config.pool.contains(&package.checksums) {
684 fetch_progress.update(&FetchResult {
685 data: vec![],
686 fetched: 0,
687 });
688 } else {
689 println!("\t(dry-run) GET missing '{url}' ({}b)", package.size);
690 fetch_progress.update(&FetchResult {
691 data: vec![],
692 fetched: package.size,
693 });
694 }
695 } else {
696 let mut full_path = PathBuf::from(prefix);
697 full_path.push(&package.file);
698
96a80415 699 match fetch_plain_file(
d2757931
FG
700 &config,
701 &url,
702 &full_path,
703 package.size,
704 &package.checksums,
705 false,
706 dry_run,
96a80415
FG
707 ) {
708 Ok(res) => fetch_progress.update(&res),
709 Err(err) if config.ignore_errors => {
710 let msg = format!(
711 "{}: failed to fetch package '{}' - {}",
712 basename, package.file, err,
713 );
714 eprintln!("{msg}");
9213b79a 715 warnings.push(msg);
96a80415 716 }
36dfc650 717 Err(err) => return Err(err),
96a80415 718 }
d2757931
FG
719 }
720
9ecde319
FG
721 if fetch_progress.file_count() % (max(total_files / 100, 1)) == 0 {
722 println!("\tProgress: {fetch_progress}");
723 }
724 }
725 println!("\tProgress: {fetch_progress}");
d2757931
FG
726 if dry_run {
727 dry_run_progress += fetch_progress;
728 } else {
729 total_progress += fetch_progress;
730 }
9ecde319
FG
731 }
732
d2757931
FG
733 if dry_run {
734 println!("\nDry-run Stats (indices, downloaded but not persisted):\n{total_progress}");
735 println!("\nDry-run stats (packages, new == missing):\n{dry_run_progress}");
736 } else {
737 println!("\nStats: {total_progress}");
738 }
9ecde319 739
9213b79a
FG
740 if !warnings.is_empty() {
741 eprintln!("Warnings:");
742 for msg in warnings {
743 eprintln!("- {msg}");
744 }
745 }
746
d2757931 747 if !dry_run {
9213b79a 748 println!("\nRotating temp. snapshot in-place: {prefix:?} -> \"{snapshot}\"");
d2757931
FG
749 let locked = config.pool.lock()?;
750 locked.rename(prefix, Path::new(&format!("{snapshot}")))?;
751 }
9ecde319
FG
752
753 Ok(())
754}
d035ecb5 755
2d13dcfc 756/// Remove a snapshot by removing the corresponding snapshot directory. To actually free up space, a garbage collection needs to be run afterwards.
d035ecb5
FG
757pub fn remove_snapshot(config: &MirrorConfig, snapshot: &Snapshot) -> Result<(), Error> {
758 let pool: Pool = pool(config)?;
759 let path = pool.get_path(Path::new(&snapshot.to_string()))?;
760
761 pool.lock()?.remove_dir(&path)
762}
763
2d13dcfc 764/// Run a garbage collection on the underlying pool.
d035ecb5
FG
765pub fn gc(config: &MirrorConfig) -> Result<(usize, u64), Error> {
766 let pool: Pool = pool(config)?;
767
768 pool.lock()?.gc()
769}
529111dc
FG
770
771/// Print differences between two snapshots
772pub fn diff_snapshots(
773 config: &MirrorConfig,
774 snapshot: &Snapshot,
775 other_snapshot: &Snapshot,
776) -> Result<Diff, Error> {
777 let pool = pool(config)?;
778 pool.lock()?.diff_dirs(
779 Path::new(&format!("{snapshot}")),
780 Path::new(&format!("{other_snapshot}")),
781 )
782}