8 use anyhow
::{bail, format_err, Error}
;
9 use flate2
::bufread
::GzDecoder
;
10 use globset
::{Glob, GlobSet, GlobSetBuilder}
;
12 use proxmox_http
::{client::sync::Client, HttpClient, HttpOptions, ProxyConfig}
;
13 use proxmox_schema
::{ApiType, Schema}
;
14 use proxmox_sys
::fs
::file_get_contents
;
17 config
::{MirrorConfig, SkipConfig, SubscriptionKey, WeakCryptoConfig}
,
20 types
::{Diff, Snapshot, SNAPSHOT_REGEX}
,
21 FetchResult
, Progress
,
25 CheckSums
, CompressionType
, FileReference
, FileReferenceType
, PackagesFile
, ReleaseFile
,
28 repositories
::{APTRepository, APTRepositoryPackageType}
,
33 fn mirror_dir(config
: &MirrorConfig
) -> PathBuf
{
34 PathBuf
::from(&config
.base_dir
).join(&config
.id
)
37 pub(crate) fn pool(config
: &MirrorConfig
) -> Result
<Pool
, Error
> {
38 let pool_dir
= PathBuf
::from(&config
.base_dir
).join(".pool");
39 Pool
::open(&mirror_dir(config
), &pool_dir
)
42 /// `MirrorConfig`, but some fields converted/parsed into usable types.
43 struct ParsedMirrorConfig
{
44 pub repository
: APTRepository
,
45 pub architectures
: Vec
<String
>,
50 pub auth
: Option
<String
>,
52 pub ignore_errors
: bool
,
54 pub weak_crypto
: WeakCryptoConfig
,
57 impl TryInto
<ParsedMirrorConfig
> for MirrorConfig
{
58 type Error
= anyhow
::Error
;
60 fn try_into(self) -> Result
<ParsedMirrorConfig
, Self::Error
> {
61 let pool
= pool(&self)?
;
63 let repository
= convert_repo_line(self.repository
.clone())?
;
65 let key
= file_get_contents(Path
::new(&self.key_path
))?
;
67 let options
= HttpOptions
{
69 concat
!("proxmox-offline-mirror/", env
!("CARGO_PKG_VERSION")).to_string(),
71 proxy_config
: ProxyConfig
::from_proxy_env()?
,
73 }; // TODO actually read version ;)
75 let client
= Client
::new(options
);
77 let weak_crypto
= match self.weak_crypto
{
78 Some(property_string
) => {
79 let value
= (WeakCryptoConfig
::API_SCHEMA
as Schema
)
80 .parse_property_string(&property_string
)?
;
81 serde_json
::from_value(value
)?
83 None
=> WeakCryptoConfig
::default(),
86 Ok(ParsedMirrorConfig
{
88 architectures
: self.architectures
,
95 ignore_errors
: self.ignore_errors
,
102 // Helper to get absolute URL for dist-specific relative `path`.
103 fn get_dist_url(repo
: &APTRepository
, path
: &str) -> String
{
104 let dist_root
= format
!("{}/dists/{}", repo
.uris
[0], repo
.suites
[0]);
106 format
!("{}/{}", dist_root
, path
)
109 // Helper to get dist-specific path given a `prefix` (snapshot dir) and relative `path`.
110 fn get_dist_path(repo
: &APTRepository
, prefix
: &Path
, path
: &str) -> PathBuf
{
111 let mut base
= PathBuf
::from(prefix
);
113 base
.push(&repo
.suites
[0]);
118 // Helper to get generic URL given a `repo` and `path`.
119 fn get_repo_url(repo
: &APTRepository
, path
: &str) -> String
{
120 format
!("{}/{}", repo
.uris
[0], path
)
123 /// Helper to fetch file from URI and optionally verify the responses checksum.
125 /// Only fetches and returns data, doesn't store anything anywhere.
130 checksums
: Option
<&CheckSums
>,
132 ) -> Result
<FetchResult
, Error
> {
133 println
!("-> GET '{}'..", uri
);
135 let headers
= if let Some(auth
) = auth
{
136 let mut map
= HashMap
::new();
137 map
.insert("Authorization".to_string(), auth
.to_string());
143 let response
= client
.get(uri
, headers
.as_ref())?
;
145 let reader
: Box
<dyn Read
> = response
.into_body();
146 let mut reader
= reader
.take(max_size
as u64);
147 let mut data
= Vec
::new();
148 reader
.read_to_end(&mut data
)?
;
150 if let Some(checksums
) = checksums
{
151 checksums
.verify(&data
)?
;
160 /// Helper to fetch InRelease or Release/Release.gpg files from repository.
162 /// Set `detached` == false to fetch InRelease or to `detached` == true for Release/Release.gpg.
163 /// Verifies the contained/detached signature and stores all fetched files under `prefix`.
165 /// Returns the verified raw release file data, or None if the "fetch" part itself fails.
167 config
: &ParsedMirrorConfig
,
171 ) -> Result
<Option
<FetchResult
>, Error
> {
172 let (name
, fetched
, sig
) = if detached
{
173 println
!("Fetching Release/Release.gpg files");
174 let sig
= match fetch_repo_file(
176 &get_dist_url(&config
.repository
, "Release.gpg"),
179 config
.auth
.as_deref(),
183 eprintln
!("Release.gpg fetch failure: {err}");
188 let mut fetched
= match fetch_repo_file(
190 &get_dist_url(&config
.repository
, "Release"),
193 config
.auth
.as_deref(),
197 eprintln
!("Release fetch failure: {err}");
201 fetched
.fetched
+= sig
.fetched
;
202 ("Release(.gpg)", fetched
, Some(sig
.data()))
204 println
!("Fetching InRelease file");
205 let fetched
= match fetch_repo_file(
207 &get_dist_url(&config
.repository
, "InRelease"),
210 config
.auth
.as_deref(),
214 eprintln
!("InRelease fetch failure: {err}");
218 ("InRelease", fetched
, None
)
221 println
!("Verifying '{name}' signature using provided repository key..");
222 let content
= fetched
.data_ref();
224 helpers
::verify_signature(content
, &config
.key
, sig
.as_deref(), &config
.weak_crypto
)?
;
227 let sha512
= Some(openssl
::sha
::sha512(content
));
228 let csums
= CheckSums
{
234 return Ok(Some(FetchResult
{
236 fetched
: fetched
.fetched
,
240 let locked
= &config
.pool
.lock()?
;
242 if !locked
.contains(&csums
) {
243 locked
.add_file(content
, &csums
, config
.sync
)?
;
249 Path
::new(&get_dist_path(&config
.repository
, prefix
, "Release")),
251 let sig
= sig
.unwrap();
252 let sha512
= Some(openssl
::sha
::sha512(&sig
));
253 let csums
= CheckSums
{
257 if !locked
.contains(&csums
) {
258 locked
.add_file(&sig
, &csums
, config
.sync
)?
;
262 Path
::new(&get_dist_path(&config
.repository
, prefix
, "Release.gpg")),
267 Path
::new(&get_dist_path(&config
.repository
, prefix
, "InRelease")),
271 Ok(Some(FetchResult
{
273 fetched
: fetched
.fetched
,
277 /// Helper to fetch an index file referenced by a `ReleaseFile`.
279 /// Since these usually come in compressed and uncompressed form, with the latter often not
280 /// actually existing in the source repository as file, this fetches and if necessary decompresses
281 /// to obtain a copy of the uncompressed data.
282 /// Will skip fetching if both references are already available with the expected checksum in the
283 /// pool, in which case they will just be re-linked under the new path.
285 /// Returns the uncompressed data.
287 config
: &ParsedMirrorConfig
,
289 reference
: &FileReference
,
290 uncompressed
: Option
<&FileReference
>,
293 ) -> Result
<FetchResult
, Error
> {
294 let url
= get_dist_url(&config
.repository
, &reference
.path
);
295 let path
= get_dist_path(&config
.repository
, prefix
, &reference
.path
);
297 if let Some(uncompressed
) = uncompressed
{
298 let uncompressed_path
= get_dist_path(&config
.repository
, prefix
, &uncompressed
.path
);
300 if config
.pool
.contains(&reference
.checksums
)
301 && config
.pool
.contains(&uncompressed
.checksums
)
305 .get_contents(&uncompressed
.checksums
, config
.verify
)?
;
308 return Ok(FetchResult { data, fetched: 0 }
);
310 // Ensure they're linked at current path
311 config
.pool
.lock()?
.link_file(&reference
.checksums
, &path
)?
;
315 .link_file(&uncompressed
.checksums
, &uncompressed_path
)?
;
316 return Ok(FetchResult { data, fetched: 0 }
);
320 let urls
= if by_hash
{
321 let mut urls
= Vec
::new();
322 if let Some((base_url
, _file_name
)) = url
.rsplit_once('
/'
) {
323 if let Some(sha512
) = reference
.checksums
.sha512
{
324 urls
.push(format
!("{base_url}/by-hash/SHA512/{}", hex
::encode(sha512
)));
326 if let Some(sha256
) = reference
.checksums
.sha256
{
327 urls
.push(format
!("{base_url}/by-hash/SHA256/{}", hex
::encode(sha256
)));
338 .fold(None
, |res
, url
| match res
{
339 Some(Ok(res
)) => Some(Ok(res
)),
340 _
=> Some(fetch_plain_file(
345 &reference
.checksums
,
350 .ok_or_else(|| format_err
!("Failed to retrieve {}", reference
.path
))??
;
352 let mut buf
= Vec
::new();
353 let raw
= res
.data_ref();
355 let decompressed
= match reference
.file_type
.compression() {
357 Some(CompressionType
::Gzip
) => {
358 let mut gz
= GzDecoder
::new(raw
);
359 gz
.read_to_end(&mut buf
)?
;
362 Some(CompressionType
::Bzip2
) => {
363 let mut bz
= bzip2
::read
::BzDecoder
::new(raw
);
364 bz
.read_to_end(&mut buf
)?
;
367 Some(CompressionType
::Lzma
) | Some(CompressionType
::Xz
) => {
368 let mut xz
= xz2
::read
::XzDecoder
::new_multi_decoder(raw
);
369 xz
.read_to_end(&mut buf
)?
;
373 let res
= FetchResult
{
374 data
: decompressed
.to_owned(),
375 fetched
: res
.fetched
,
382 let locked
= &config
.pool
.lock()?
;
383 if let Some(uncompressed
) = uncompressed
{
384 if !locked
.contains(&uncompressed
.checksums
) {
385 locked
.add_file(decompressed
, &uncompressed
.checksums
, config
.sync
)?
;
388 // Ensure it's linked at current path
389 let uncompressed_path
= get_dist_path(&config
.repository
, prefix
, &uncompressed
.path
);
390 locked
.link_file(&uncompressed
.checksums
, &uncompressed_path
)?
;
396 /// Helper to fetch arbitrary files like binary packages.
398 /// Will skip fetching if matching file already exists locally, in which case it will just be
399 /// re-linked under the new path.
401 /// If need_data is false and the mirror config is set to skip verification, reading the file's
402 /// content will be skipped as well if fetching was skipped.
404 config
: &ParsedMirrorConfig
,
408 checksums
: &CheckSums
,
411 ) -> Result
<FetchResult
, Error
> {
412 let locked
= &config
.pool
.lock()?
;
413 let res
= if locked
.contains(checksums
) {
414 if need_data
|| config
.verify
{
416 .get_contents(checksums
, config
.verify
)
417 .map(|data
| FetchResult { data, fetched: 0 }
)?
419 // performance optimization for .deb files if verify is false
420 // we never need the file contents and they make up the bulk of a repo
426 } else if dry_run
&& !need_data
{
432 let fetched
= fetch_repo_file(
437 config
.auth
.as_deref(),
439 locked
.add_file(fetched
.data_ref(), checksums
, config
.verify
)?
;
444 // Ensure it's linked at current path
445 locked
.link_file(checksums
, file
)?
;
451 /// Initialize a new mirror (by creating the corresponding pool).
452 pub fn init(config
: &MirrorConfig
) -> Result
<(), Error
> {
453 let pool_dir
= PathBuf
::from(&config
.base_dir
).join(".pool");
455 let dir
= mirror_dir(config
);
457 Pool
::create(&dir
, &pool_dir
)?
;
461 /// Destroy a mirror (by destroying the corresponding pool's link dir followed by GC).
462 pub fn destroy(config
: &MirrorConfig
) -> Result
<(), Error
> {
463 let pool
: Pool
= pool(config
)?
;
464 pool
.lock()?
.destroy()?
;
470 pub fn list_snapshots(config
: &MirrorConfig
) -> Result
<Vec
<Snapshot
>, Error
> {
471 let _pool
: Pool
= pool(config
)?
;
473 let mut list
: Vec
<Snapshot
> = vec
![];
475 let path
= mirror_dir(config
);
477 proxmox_sys
::fs
::scandir(
481 |_l2_fd
, snapshot
, file_type
| {
482 if file_type
!= nix
::dir
::Type
::Directory
{
486 list
.push(snapshot
.parse()?
);
492 list
.sort_unstable();
497 struct MirrorProgress
{
498 warnings
: Vec
<String
>,
505 fn convert_to_globset(config
: &ParsedMirrorConfig
) -> Result
<Option
<GlobSet
>, Error
> {
506 Ok(if let Some(skipped_packages
) = &config
.skip
.skip_packages
{
507 let mut globs
= GlobSetBuilder
::new();
508 for glob
in skipped_packages
{
509 let glob
= Glob
::new(glob
)?
;
512 let globs
= globs
.build()?
;
519 fn fetch_binary_packages(
520 config
: &ParsedMirrorConfig
,
522 packages_indices
: HashMap
<&String
, PackagesFile
>,
525 progress
: &mut MirrorProgress
,
526 ) -> Result
<(), Error
> {
527 let skipped_package_globs
= convert_to_globset(config
)?
;
529 for (basename
, references
) in packages_indices
{
530 let total_files
= references
.files
.len();
531 if total_files
== 0 {
532 println
!("\n{basename} - no files, skipping.");
535 println
!("\n{basename} - {total_files} total file(s)");
538 let mut fetch_progress
= Progress
::new();
539 let mut skip_count
= 0usize
;
540 let mut skip_bytes
= 0usize
;
542 for package
in references
.files
{
543 if let Some(ref sections
) = &config
.skip
.skip_sections
{
544 if sections
.iter().any(|section
| {
545 package
.section
== *section
546 || package
.section
== format
!("{component}/{section}")
549 "\tskipping {} - {}b (section '{}')",
550 package
.package
, package
.size
, package
.section
553 skip_bytes
+= package
.size
;
557 if let Some(skipped_package_globs
) = &skipped_package_globs
{
558 let matches
= skipped_package_globs
.matches(&package
.package
);
559 if !matches
.is_empty() {
560 // safety, skipped_package_globs is set based on this
561 let globs
= config
.skip
.skip_packages
.as_ref().unwrap();
562 let matches
: Vec
<String
> = matches
.iter().map(|i
| globs
[*i
].clone()).collect();
564 "\tskipping {} - {}b (package glob(s): {})",
570 skip_bytes
+= package
.size
;
574 let url
= get_repo_url(&config
.repository
, &package
.file
);
577 if config
.pool
.contains(&package
.checksums
) {
578 fetch_progress
.update(&FetchResult
{
583 println
!("\t(dry-run) GET missing '{url}' ({}b)", package
.size
);
584 fetch_progress
.update(&FetchResult
{
586 fetched
: package
.size
,
590 let mut full_path
= PathBuf
::from(prefix
);
591 full_path
.push(&package
.file
);
593 match fetch_plain_file(
602 Ok(res
) => fetch_progress
.update(&res
),
603 Err(err
) if config
.ignore_errors
=> {
605 "{}: failed to fetch package '{}' - {}",
606 basename
, package
.file
, err
,
609 progress
.warnings
.push(msg
);
611 Err(err
) => return Err(err
),
615 if fetch_progress
.file_count() % (max(total_files
/ 100, 1)) == 0 {
616 println
!("\tProgress: {fetch_progress}");
619 println
!("\tProgress: {fetch_progress}");
621 progress
.dry_run
+= fetch_progress
;
623 progress
.total
+= fetch_progress
;
626 progress
.skip_count
+= skip_count
;
627 progress
.skip_bytes
+= skip_bytes
;
628 println
!("Skipped downloading {skip_count} packages totalling {skip_bytes}b");
635 fn fetch_source_packages(
636 config
: &ParsedMirrorConfig
,
638 source_packages_indices
: HashMap
<&String
, SourcesFile
>,
641 progress
: &mut MirrorProgress
,
642 ) -> Result
<(), Error
> {
643 let skipped_package_globs
= convert_to_globset(config
)?
;
645 for (basename
, references
) in source_packages_indices
{
646 let total_source_packages
= references
.source_packages
.len();
647 if total_source_packages
== 0 {
648 println
!("\n{basename} - no files, skipping.");
651 println
!("\n{basename} - {total_source_packages} total source package(s)");
654 let mut fetch_progress
= Progress
::new();
655 let mut skip_count
= 0usize
;
656 let mut skip_bytes
= 0usize
;
657 for package
in references
.source_packages
{
658 if let Some(ref sections
) = &config
.skip
.skip_sections
{
659 if sections
.iter().any(|section
| {
660 package
.section
.as_ref() == Some(section
)
661 || package
.section
== Some(format
!("{component}/{section}"))
664 "\tskipping {} - {}b (section '{}')",
667 package
.section
.as_ref().unwrap(),
670 skip_bytes
+= package
.size();
674 if let Some(skipped_package_globs
) = &skipped_package_globs
{
675 let matches
= skipped_package_globs
.matches(&package
.package
);
676 if !matches
.is_empty() {
677 // safety, skipped_package_globs is set based on this
678 let globs
= config
.skip
.skip_packages
.as_ref().unwrap();
679 let matches
: Vec
<String
> = matches
.iter().map(|i
| globs
[*i
].clone()).collect();
681 "\tskipping {} - {}b (package glob(s): {})",
687 skip_bytes
+= package
.size();
692 for file_reference
in package
.files
.values() {
693 let path
= format
!("{}/{}", package
.directory
, file_reference
.file
);
694 let url
= get_repo_url(&config
.repository
, &path
);
697 if config
.pool
.contains(&file_reference
.checksums
) {
698 fetch_progress
.update(&FetchResult
{
703 println
!("\t(dry-run) GET missing '{url}' ({}b)", file_reference
.size
);
704 fetch_progress
.update(&FetchResult
{
706 fetched
: file_reference
.size
,
710 let mut full_path
= PathBuf
::from(prefix
);
711 full_path
.push(&path
);
713 match fetch_plain_file(
718 &file_reference
.checksums
,
722 Ok(res
) => fetch_progress
.update(&res
),
723 Err(err
) if config
.ignore_errors
=> {
725 "{}: failed to fetch package '{}' - {}",
726 basename
, file_reference
.file
, err
,
729 progress
.warnings
.push(msg
);
731 Err(err
) => return Err(err
),
735 if fetch_progress
.file_count() % (max(total_source_packages
/ 100, 1)) == 0 {
736 println
!("\tProgress: {fetch_progress}");
740 println
!("\tProgress: {fetch_progress}");
742 progress
.dry_run
+= fetch_progress
;
744 progress
.total
+= fetch_progress
;
747 progress
.skip_count
+= skip_count
;
748 progress
.skip_bytes
+= skip_bytes
;
749 println
!("Skipped downloading {skip_count} packages totalling {skip_bytes}b");
756 /// Create a new snapshot of the remote repository, fetching and storing files as needed.
758 /// Operates in three phases:
759 /// - Fetch and verify release files
760 /// - Fetch referenced indices according to config
761 /// - Fetch binary packages referenced by package indices
763 /// Files will be linked in a temporary directory and only renamed to the final, valid snapshot
764 /// directory at the end. In case of error, leftover `XXX.tmp` directories at the top level of
765 /// `base_dir` can be safely removed once the next snapshot was successfully created, as they only
766 /// contain hardlinks.
767 pub fn create_snapshot(
768 config
: MirrorConfig
,
770 subscription
: Option
<SubscriptionKey
>,
772 ) -> Result
<(), Error
> {
773 let auth
= if let Some(product
) = &config
.use_subscription
{
777 "Mirror {} requires a subscription key, but none given.",
781 Some(key
) if key
.product() == *product
=> {
782 let base64
= base64
::encode(format
!("{}:{}", key
.key
, key
.server_id
));
783 Some(format
!("basic {base64}"))
787 "Repository product type '{}' and key product type '{}' don't match.",
797 let mut config
: ParsedMirrorConfig
= config
.try_into()?
;
800 let prefix
= format
!("{snapshot}.tmp");
801 let prefix
= Path
::new(&prefix
);
803 let mut progress
= MirrorProgress
{
804 warnings
: Vec
::new(),
807 dry_run
: Progress
::new(),
808 total
: Progress
::new(),
811 let parse_release
= |res
: FetchResult
, name
: &str| -> Result
<ReleaseFile
, Error
> {
812 println
!("Parsing {name}..");
813 let parsed
: ReleaseFile
= res
.data
[..].try_into()?
;
815 "'{name}' file has {} referenced files..",
821 // we want both on-disk for compat reasons, if both are available
822 let release
= fetch_release(&config
, prefix
, true, dry_run
)?
824 progress
.total
.update(&res
);
825 parse_release(res
, "Release")
829 let in_release
= fetch_release(&config
, prefix
, false, dry_run
)?
831 progress
.total
.update(&res
);
832 parse_release(res
, "InRelease")
836 // at least one must be available to proceed
837 let release
= release
839 .ok_or_else(|| format_err
!("Neither Release(.gpg) nor InRelease available!"))?
;
841 let mut per_component
= HashMap
::new();
842 let mut others
= Vec
::new();
846 .contains(&APTRepositoryPackageType
::Deb
);
850 .contains(&APTRepositoryPackageType
::DebSrc
);
852 for (basename
, references
) in &release
.files
{
853 let reference
= references
.first();
854 let reference
= if let Some(reference
) = reference
{
859 let skip_components
= !&config
.repository
.components
.contains(&reference
.component
);
861 let skip
= skip_components
862 || match &reference
.file_type
{
863 FileReferenceType
::Ignored
=> true,
864 FileReferenceType
::PDiff
=> true, // would require fetching the patches as well
865 FileReferenceType
::Sources(_
) => !source
,
867 if let Some(arch
) = reference
.file_type
.architecture() {
868 !binary
|| !config
.architectures
.contains(arch
)
875 println
!("Skipping {}", reference
.path
);
876 others
.push(reference
);
878 let list
= per_component
879 .entry(reference
.component
)
880 .or_insert_with(Vec
::new
);
886 let mut indices_size
= 0_usize
;
887 let mut total_count
= 0;
889 for (component
, references
) in &per_component
{
890 println
!("Component '{component}'");
892 let mut component_indices_size
= 0;
894 for basename
in references
{
895 for reference
in release
.files
.get(*basename
).unwrap() {
896 println
!("\t{:?}: {:?}", reference
.path
, reference
.file_type
);
897 component_indices_size
+= reference
.size
;
900 indices_size
+= component_indices_size
;
902 let component_count
= references
.len();
903 total_count
+= component_count
;
905 println
!("Component references count: {component_count}");
906 println
!("Component indices size: {component_indices_size}");
907 if references
.is_empty() {
908 println
!("\tNo references found..");
911 println
!("Total indices count: {total_count}");
912 println
!("Total indices size: {indices_size}");
914 if !others
.is_empty() {
915 println
!("Skipped {} references", others
.len());
919 let mut packages_size
= 0_usize
;
920 #[allow(clippy::type_complexity)]
921 let mut per_component_indices
: HashMap
<
924 HashMap
<&String
, PackagesFile
>,
925 HashMap
<&String
, SourcesFile
>,
929 let mut failed_references
= Vec
::new();
930 for (component
, references
) in per_component
{
931 println
!("\nFetching indices for component '{component}'");
932 let mut component_deb_size
= 0;
933 let mut component_dsc_size
= 0;
935 let mut fetch_progress
= Progress
::new();
937 let (packages_indices
, source_packages_indices
) =
938 per_component_indices
.entry(component
.clone()).or_default();
940 for basename
in references
{
941 println
!("\tFetching '{basename}'..");
942 let files
= release
.files
.get(basename
).unwrap();
943 let uncompressed_ref
= files
.iter().find(|reference
| reference
.path
== *basename
);
945 let mut package_index_data
= None
;
947 for reference
in files
{
948 // if both compressed and uncompressed are referenced, the uncompressed file may
949 // not exist on the server
950 if Some(reference
) == uncompressed_ref
&& files
.len() > 1 {
954 // this will ensure the uncompressed file will be written locally
955 let res
= match fetch_index_file(
960 release
.aquire_by_hash
,
964 Err(err
) if !reference
.file_type
.is_package_index() => {
966 "Failed to fetch '{:?}' type reference '{}', skipping - {err}",
967 reference
.file_type
, reference
.path
970 progress
.warnings
.push(msg
);
971 failed_references
.push(reference
);
974 Err(err
) => return Err(err
),
976 fetch_progress
.update(&res
);
978 if package_index_data
.is_none() && reference
.file_type
.is_package_index() {
979 package_index_data
= Some((&reference
.file_type
, res
.data()));
982 if let Some((reference_type
, data
)) = package_index_data
{
983 match reference_type
{
984 FileReferenceType
::Packages(_
, _
) => {
985 let packages
: PackagesFile
= data
[..].try_into()?
;
986 let size
: usize = packages
.files
.iter().map(|p
| p
.size
).sum();
987 println
!("\t{} packages totalling {size}", packages
.files
.len());
988 component_deb_size
+= size
;
990 packages_indices
.entry(basename
).or_insert(packages
);
992 FileReferenceType
::Sources(_
) => {
993 let source_packages
: SourcesFile
= data
[..].try_into()?
;
994 let size
: usize = source_packages
1000 "\t{} source packages totalling {size}",
1001 source_packages
.source_packages
.len()
1003 component_dsc_size
+= size
;
1004 source_packages_indices
1006 .or_insert(source_packages
);
1009 eprintln
!("Unknown package index '{unknown:?}', skipping processing..")
1013 println
!("Progress: {fetch_progress}");
1016 println
!("Total deb size for component: {component_deb_size}");
1017 packages_size
+= component_deb_size
;
1019 println
!("Total dsc size for component: {component_dsc_size}");
1020 packages_size
+= component_dsc_size
;
1022 progress
.total
+= fetch_progress
;
1024 println
!("Total deb size: {packages_size}");
1025 if !failed_references
.is_empty() {
1026 eprintln
!("Failed to download non-package-index references:");
1027 for reference
in failed_references
{
1028 eprintln
!("\t{}", reference
.path
);
1032 for (component
, (packages_indices
, source_packages_indices
)) in per_component_indices
{
1033 println
!("\nFetching {component} packages..");
1034 fetch_binary_packages(
1043 fetch_source_packages(
1046 source_packages_indices
,
1055 "\nDry-run Stats (indices, downloaded but not persisted):\n{}",
1059 "\nDry-run stats (packages, new == missing):\n{}",
1063 println
!("\nStats: {}", progress
.total
);
1065 if total_count
> 0 {
1067 "Skipped downloading {} packages totalling {}b",
1068 progress
.skip_count
, progress
.skip_bytes
,
1072 if !progress
.warnings
.is_empty() {
1073 eprintln
!("Warnings:");
1074 for msg
in progress
.warnings
{
1075 eprintln
!("- {msg}");
1080 println
!("\nRotating temp. snapshot in-place: {prefix:?} -> \"{snapshot}\"");
1081 let locked
= config
.pool
.lock()?
;
1082 locked
.rename(prefix
, Path
::new(&format
!("{snapshot}")))?
;
1088 /// Remove a snapshot by removing the corresponding snapshot directory. To actually free up space,
1089 /// a garbage collection needs to be run afterwards.
1090 pub fn remove_snapshot(config
: &MirrorConfig
, snapshot
: &Snapshot
) -> Result
<(), Error
> {
1091 let pool
: Pool
= pool(config
)?
;
1092 let path
= pool
.get_path(Path
::new(&snapshot
.to_string()))?
;
1094 pool
.lock()?
.remove_dir(&path
)
1097 /// Run a garbage collection on the underlying pool.
1098 pub fn gc(config
: &MirrorConfig
) -> Result
<(usize, u64), Error
> {
1099 let pool
: Pool
= pool(config
)?
;
1104 /// Print differences between two snapshots
1105 pub fn diff_snapshots(
1106 config
: &MirrorConfig
,
1107 snapshot
: &Snapshot
,
1108 other_snapshot
: &Snapshot
,
1109 ) -> Result
<Diff
, Error
> {
1110 let pool
= pool(config
)?
;
1111 pool
.lock()?
.diff_dirs(
1112 Path
::new(&format
!("{snapshot}")),
1113 Path
::new(&format
!("{other_snapshot}")),