]>
Commit | Line | Data |
---|---|---|
9ecde319 FG |
1 | use std::{ |
2 | cmp::max, | |
3 | collections::HashMap, | |
4 | io::Read, | |
5 | path::{Path, PathBuf}, | |
6 | }; | |
7 | ||
8b267808 | 8 | use anyhow::{bail, format_err, Error}; |
9ecde319 | 9 | use flate2::bufread::GzDecoder; |
e79308e6 | 10 | use globset::{Glob, GlobSetBuilder}; |
d035ecb5 | 11 | use nix::libc; |
49997188 | 12 | use proxmox_http::{client::sync::Client, HttpClient, HttpOptions}; |
d035ecb5 FG |
13 | use proxmox_sys::fs::file_get_contents; |
14 | ||
15 | use crate::{ | |
e79308e6 | 16 | config::{MirrorConfig, SkipConfig, SubscriptionKey}, |
d035ecb5 FG |
17 | convert_repo_line, |
18 | pool::Pool, | |
529111dc | 19 | types::{Diff, Snapshot, SNAPSHOT_REGEX}, |
d035ecb5 FG |
20 | FetchResult, Progress, |
21 | }; | |
9ecde319 FG |
22 | use proxmox_apt::{ |
23 | deb822::{ | |
24 | CheckSums, CompressionType, FileReference, FileReferenceType, PackagesFile, ReleaseFile, | |
25 | }, | |
26 | repositories::{APTRepository, APTRepositoryPackageType}, | |
27 | }; | |
28 | ||
29 | use crate::helpers; | |
30 | ||
c598cb15 FG |
31 | fn mirror_dir(config: &MirrorConfig) -> String { |
32 | format!("{}/{}", config.base_dir, config.id) | |
33 | } | |
34 | ||
d035ecb5 | 35 | pub(crate) fn pool(config: &MirrorConfig) -> Result<Pool, Error> { |
c598cb15 FG |
36 | let pool_dir = format!("{}/.pool", config.base_dir); |
37 | Pool::open(Path::new(&mirror_dir(config)), Path::new(&pool_dir)) | |
d035ecb5 FG |
38 | } |
39 | ||
2d13dcfc | 40 | /// `MirrorConfig`, but some fields converted/parsed into usable types. |
d035ecb5 FG |
41 | struct ParsedMirrorConfig { |
42 | pub repository: APTRepository, | |
43 | pub architectures: Vec<String>, | |
44 | pub pool: Pool, | |
45 | pub key: Vec<u8>, | |
46 | pub verify: bool, | |
47 | pub sync: bool, | |
8b267808 | 48 | pub auth: Option<String>, |
49997188 | 49 | pub client: Client, |
96a80415 | 50 | pub ignore_errors: bool, |
e79308e6 | 51 | pub skip: SkipConfig, |
d035ecb5 FG |
52 | } |
53 | ||
54 | impl TryInto<ParsedMirrorConfig> for MirrorConfig { | |
55 | type Error = anyhow::Error; | |
56 | ||
57 | fn try_into(self) -> Result<ParsedMirrorConfig, Self::Error> { | |
58 | let pool = pool(&self)?; | |
59 | ||
60 | let repository = convert_repo_line(self.repository.clone())?; | |
61 | ||
62 | let key = file_get_contents(Path::new(&self.key_path))?; | |
63 | ||
49997188 FG |
64 | let options = HttpOptions { |
65 | user_agent: Some("proxmox-offline-mirror 0.1".to_string()), | |
66 | ..Default::default() | |
67 | }; // TODO actually read version ;) | |
68 | ||
69 | let client = Client::new(options); | |
8b267808 | 70 | |
d035ecb5 FG |
71 | Ok(ParsedMirrorConfig { |
72 | repository, | |
73 | architectures: self.architectures, | |
74 | pool, | |
75 | key, | |
76 | verify: self.verify, | |
77 | sync: self.sync, | |
8b267808 | 78 | auth: None, |
49997188 | 79 | client, |
96a80415 | 80 | ignore_errors: self.ignore_errors, |
e79308e6 | 81 | skip: self.skip, |
d035ecb5 FG |
82 | }) |
83 | } | |
84 | } | |
85 | ||
2d13dcfc | 86 | // Helper to get absolute URL for dist-specific relative `path`. |
9ecde319 FG |
87 | fn get_dist_url(repo: &APTRepository, path: &str) -> String { |
88 | let dist_root = format!("{}/dists/{}", repo.uris[0], repo.suites[0]); | |
89 | ||
90 | format!("{}/{}", dist_root, path) | |
91 | } | |
92 | ||
2d13dcfc | 93 | // Helper to get dist-specific path given a `prefix` (snapshot dir) and relative `path`. |
9ecde319 FG |
94 | fn get_dist_path(repo: &APTRepository, prefix: &Path, path: &str) -> PathBuf { |
95 | let mut base = PathBuf::from(prefix); | |
96 | base.push("dists"); | |
97 | base.push(&repo.suites[0]); | |
98 | base.push(path); | |
99 | base | |
100 | } | |
101 | ||
2d13dcfc | 102 | // Helper to get generic URL given a `repo` and `path`. |
9ecde319 FG |
103 | fn get_repo_url(repo: &APTRepository, path: &str) -> String { |
104 | format!("{}/{}", repo.uris[0], path) | |
105 | } | |
106 | ||
2d13dcfc FG |
107 | /// Helper to fetch file from URI and optionally verify the responses checksum. |
108 | /// | |
109 | /// Only fetches and returns data, doesn't store anything anywhere. | |
9ecde319 | 110 | fn fetch_repo_file( |
49997188 | 111 | client: &Client, |
9ecde319 | 112 | uri: &str, |
d7e210ac | 113 | max_size: usize, |
9ecde319 | 114 | checksums: Option<&CheckSums>, |
8b267808 | 115 | auth: Option<&str>, |
9ecde319 FG |
116 | ) -> Result<FetchResult, Error> { |
117 | println!("-> GET '{}'..", uri); | |
118 | ||
49997188 FG |
119 | let headers = if let Some(auth) = auth { |
120 | let mut map = HashMap::new(); | |
121 | map.insert("Authorization".to_string(), auth.to_string()); | |
122 | Some(map) | |
8b267808 | 123 | } else { |
49997188 | 124 | None |
8b267808 FG |
125 | }; |
126 | ||
49997188 | 127 | let response = client.get(uri, headers.as_ref())?; |
9ecde319 | 128 | |
49997188 | 129 | let reader: Box<dyn Read> = response.into_body(); |
d7e210ac | 130 | let mut reader = reader.take(max_size as u64); |
9ecde319 | 131 | let mut data = Vec::new(); |
49997188 | 132 | reader.read_to_end(&mut data)?; |
9ecde319 FG |
133 | |
134 | if let Some(checksums) = checksums { | |
135 | checksums.verify(&data)?; | |
136 | } | |
137 | ||
138 | Ok(FetchResult { | |
49997188 | 139 | fetched: data.len(), |
9ecde319 | 140 | data, |
9ecde319 FG |
141 | }) |
142 | } | |
143 | ||
2d13dcfc FG |
144 | /// Helper to fetch InRelease (`detached` == false) or Release/Release.gpg (`detached` == true) files from repository. |
145 | /// | |
146 | /// Verifies the contained/detached signature, stores all fetched files under `prefix`, and returns the verified raw release file data. | |
9ecde319 FG |
147 | fn fetch_release( |
148 | config: &ParsedMirrorConfig, | |
149 | prefix: &Path, | |
150 | detached: bool, | |
d2757931 | 151 | dry_run: bool, |
9ecde319 FG |
152 | ) -> Result<FetchResult, Error> { |
153 | let (name, fetched, sig) = if detached { | |
154 | println!("Fetching Release/Release.gpg files"); | |
8b267808 | 155 | let sig = fetch_repo_file( |
49997188 | 156 | &config.client, |
8b267808 | 157 | &get_dist_url(&config.repository, "Release.gpg"), |
d7e210ac | 158 | 1024 * 1024, |
8b267808 FG |
159 | None, |
160 | config.auth.as_deref(), | |
161 | )?; | |
9ecde319 | 162 | let mut fetched = fetch_repo_file( |
49997188 | 163 | &config.client, |
9ecde319 | 164 | &get_dist_url(&config.repository, "Release"), |
d7e210ac | 165 | 256 * 1024 * 1024, |
9ecde319 | 166 | None, |
8b267808 | 167 | config.auth.as_deref(), |
9ecde319 FG |
168 | )?; |
169 | fetched.fetched += sig.fetched; | |
170 | ("Release(.gpg)", fetched, Some(sig.data())) | |
171 | } else { | |
172 | println!("Fetching InRelease file"); | |
173 | let fetched = fetch_repo_file( | |
49997188 | 174 | &config.client, |
9ecde319 | 175 | &get_dist_url(&config.repository, "InRelease"), |
d7e210ac | 176 | 256 * 1024 * 1024, |
9ecde319 | 177 | None, |
8b267808 | 178 | config.auth.as_deref(), |
9ecde319 FG |
179 | )?; |
180 | ("InRelease", fetched, None) | |
181 | }; | |
182 | ||
183 | println!("Verifying '{name}' signature using provided repository key.."); | |
184 | let content = fetched.data_ref(); | |
185 | let verified = helpers::verify_signature(content, &config.key, sig.as_deref())?; | |
186 | println!("Success"); | |
187 | ||
188 | let sha512 = Some(openssl::sha::sha512(content)); | |
189 | let csums = CheckSums { | |
190 | sha512, | |
191 | ..Default::default() | |
192 | }; | |
193 | ||
d2757931 FG |
194 | if dry_run { |
195 | return Ok(FetchResult { | |
196 | data: verified, | |
197 | fetched: fetched.fetched, | |
198 | }); | |
199 | } | |
200 | ||
9ecde319 FG |
201 | let locked = &config.pool.lock()?; |
202 | ||
203 | if !locked.contains(&csums) { | |
d035ecb5 | 204 | locked.add_file(content, &csums, config.sync)?; |
9ecde319 FG |
205 | } |
206 | ||
207 | if detached { | |
208 | locked.link_file( | |
209 | &csums, | |
210 | Path::new(&get_dist_path(&config.repository, prefix, "Release")), | |
211 | )?; | |
212 | let sig = sig.unwrap(); | |
213 | let sha512 = Some(openssl::sha::sha512(&sig)); | |
214 | let csums = CheckSums { | |
215 | sha512, | |
216 | ..Default::default() | |
217 | }; | |
218 | if !locked.contains(&csums) { | |
d035ecb5 | 219 | locked.add_file(&sig, &csums, config.sync)?; |
9ecde319 FG |
220 | } |
221 | locked.link_file( | |
222 | &csums, | |
223 | Path::new(&get_dist_path(&config.repository, prefix, "Release.gpg")), | |
224 | )?; | |
225 | } else { | |
226 | locked.link_file( | |
227 | &csums, | |
228 | Path::new(&get_dist_path(&config.repository, prefix, "InRelease")), | |
229 | )?; | |
230 | } | |
231 | ||
232 | Ok(FetchResult { | |
233 | data: verified, | |
234 | fetched: fetched.fetched, | |
235 | }) | |
236 | } | |
237 | ||
2d13dcfc FG |
238 | /// Helper to fetch an index file referenced by a `ReleaseFile`. |
239 | /// | |
240 | /// Since these usually come in compressed and uncompressed form, with the latter often not actually existing in the source repository as file, this fetches and if necessary decompresses to obtain a copy of the uncompressed data. | |
241 | /// Will skip fetching if both references are already available with the expected checksum in the pool, in which case they will just be re-linked under the new path. | |
242 | /// | |
243 | /// Returns the uncompressed data. | |
9ecde319 FG |
244 | fn fetch_index_file( |
245 | config: &ParsedMirrorConfig, | |
246 | prefix: &Path, | |
247 | reference: &FileReference, | |
c5fed38d | 248 | uncompressed: Option<&FileReference>, |
8063fd36 | 249 | by_hash: bool, |
d2757931 | 250 | dry_run: bool, |
9ecde319 FG |
251 | ) -> Result<FetchResult, Error> { |
252 | let url = get_dist_url(&config.repository, &reference.path); | |
253 | let path = get_dist_path(&config.repository, prefix, &reference.path); | |
c5fed38d FG |
254 | |
255 | if let Some(uncompressed) = uncompressed { | |
256 | let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path); | |
257 | ||
258 | if config.pool.contains(&reference.checksums) | |
259 | && config.pool.contains(&uncompressed.checksums) | |
260 | { | |
261 | let data = config | |
262 | .pool | |
263 | .get_contents(&uncompressed.checksums, config.verify)?; | |
264 | ||
d2757931 FG |
265 | if dry_run { |
266 | return Ok(FetchResult { data, fetched: 0 }); | |
267 | } | |
c5fed38d FG |
268 | // Ensure they're linked at current path |
269 | config.pool.lock()?.link_file(&reference.checksums, &path)?; | |
270 | config | |
271 | .pool | |
272 | .lock()? | |
273 | .link_file(&uncompressed.checksums, &uncompressed_path)?; | |
274 | return Ok(FetchResult { data, fetched: 0 }); | |
275 | } | |
9ecde319 FG |
276 | } |
277 | ||
8063fd36 FG |
278 | let urls = if by_hash { |
279 | let mut urls = Vec::new(); | |
280 | if let Some((base_url, _file_name)) = url.rsplit_once('/') { | |
281 | if let Some(sha512) = reference.checksums.sha512 { | |
282 | urls.push(format!("{base_url}/by-hash/SHA512/{}", hex::encode(sha512))); | |
283 | } | |
284 | if let Some(sha256) = reference.checksums.sha256 { | |
285 | urls.push(format!("{base_url}/by-hash/SHA256/{}", hex::encode(sha256))); | |
286 | } | |
287 | } | |
288 | urls.push(url); | |
289 | urls | |
290 | } else { | |
291 | vec![url] | |
292 | }; | |
293 | ||
294 | let res = urls | |
295 | .iter() | |
296 | .fold(None, |res, url| match res { | |
297 | Some(Ok(res)) => Some(Ok(res)), | |
298 | _ => Some(fetch_plain_file( | |
299 | config, | |
300 | url, | |
301 | &path, | |
302 | reference.size, | |
303 | &reference.checksums, | |
304 | true, | |
d2757931 | 305 | dry_run, |
8063fd36 FG |
306 | )), |
307 | }) | |
308 | .ok_or_else(|| format_err!("Failed to retrieve {}", reference.path))??; | |
9ecde319 FG |
309 | |
310 | let mut buf = Vec::new(); | |
311 | let raw = res.data_ref(); | |
312 | ||
313 | let decompressed = match reference.file_type.compression() { | |
314 | None => raw, | |
315 | Some(CompressionType::Gzip) => { | |
316 | let mut gz = GzDecoder::new(raw); | |
317 | gz.read_to_end(&mut buf)?; | |
318 | &buf[..] | |
319 | } | |
320 | Some(CompressionType::Bzip2) => { | |
321 | let mut bz = bzip2::read::BzDecoder::new(raw); | |
322 | bz.read_to_end(&mut buf)?; | |
323 | &buf[..] | |
324 | } | |
325 | Some(CompressionType::Lzma) | Some(CompressionType::Xz) => { | |
bb1685a0 | 326 | let mut xz = xz2::read::XzDecoder::new_multi_decoder(raw); |
9ecde319 FG |
327 | xz.read_to_end(&mut buf)?; |
328 | &buf[..] | |
329 | } | |
330 | }; | |
d2757931 FG |
331 | let res = FetchResult { |
332 | data: decompressed.to_owned(), | |
333 | fetched: res.fetched, | |
334 | }; | |
335 | ||
336 | if dry_run { | |
337 | return Ok(res); | |
338 | } | |
9ecde319 FG |
339 | |
340 | let locked = &config.pool.lock()?; | |
c5fed38d FG |
341 | if let Some(uncompressed) = uncompressed { |
342 | if !locked.contains(&uncompressed.checksums) { | |
343 | locked.add_file(decompressed, &uncompressed.checksums, config.sync)?; | |
344 | } | |
9ecde319 | 345 | |
c5fed38d FG |
346 | // Ensure it's linked at current path |
347 | let uncompressed_path = get_dist_path(&config.repository, prefix, &uncompressed.path); | |
348 | locked.link_file(&uncompressed.checksums, &uncompressed_path)?; | |
349 | } | |
9ecde319 | 350 | |
d2757931 | 351 | Ok(res) |
9ecde319 FG |
352 | } |
353 | ||
2d13dcfc FG |
354 | /// Helper to fetch arbitrary files like binary packages. |
355 | /// | |
356 | /// Will skip fetching if matching file already exists locally, in which case it will just be re-linked under the new path. | |
357 | /// | |
358 | /// If need_data is false and the mirror config is set to skip verification, reading the file's content will be skipped as well if fetching was skipped. | |
9ecde319 FG |
359 | fn fetch_plain_file( |
360 | config: &ParsedMirrorConfig, | |
361 | url: &str, | |
362 | file: &Path, | |
d7e210ac | 363 | max_size: usize, |
9ecde319 FG |
364 | checksums: &CheckSums, |
365 | need_data: bool, | |
d2757931 | 366 | dry_run: bool, |
9ecde319 FG |
367 | ) -> Result<FetchResult, Error> { |
368 | let locked = &config.pool.lock()?; | |
369 | let res = if locked.contains(checksums) { | |
370 | if need_data || config.verify { | |
371 | locked | |
372 | .get_contents(checksums, config.verify) | |
373 | .map(|data| FetchResult { data, fetched: 0 })? | |
374 | } else { | |
375 | // performance optimization for .deb files if verify is false | |
376 | // we never need the file contents and they make up the bulk of a repo | |
377 | FetchResult { | |
378 | data: vec![], | |
379 | fetched: 0, | |
380 | } | |
381 | } | |
d2757931 FG |
382 | } else if dry_run && !need_data { |
383 | FetchResult { | |
384 | data: vec![], | |
385 | fetched: 0, | |
386 | } | |
9ecde319 | 387 | } else { |
8b267808 | 388 | let fetched = fetch_repo_file( |
49997188 | 389 | &config.client, |
8b267808 | 390 | url, |
d7e210ac | 391 | max_size, |
8b267808 FG |
392 | Some(checksums), |
393 | config.auth.as_deref(), | |
394 | )?; | |
9ecde319 FG |
395 | locked.add_file(fetched.data_ref(), checksums, config.verify)?; |
396 | fetched | |
397 | }; | |
398 | ||
d2757931 FG |
399 | if !dry_run { |
400 | // Ensure it's linked at current path | |
401 | locked.link_file(checksums, file)?; | |
402 | } | |
9ecde319 FG |
403 | |
404 | Ok(res) | |
405 | } | |
406 | ||
2d13dcfc | 407 | /// Initialize a new mirror (by creating the corresponding pool). |
d035ecb5 | 408 | pub fn init(config: &MirrorConfig) -> Result<(), Error> { |
c598cb15 FG |
409 | let pool_dir = format!("{}/.pool", config.base_dir); |
410 | ||
411 | let dir = format!("{}/{}", config.base_dir, config.id); | |
412 | ||
413 | Pool::create(Path::new(&dir), Path::new(&pool_dir))?; | |
d035ecb5 FG |
414 | Ok(()) |
415 | } | |
416 | ||
c598cb15 | 417 | /// Destroy a mirror (by destroying the corresponding pool's link dir followed by GC). |
d035ecb5 FG |
418 | pub fn destroy(config: &MirrorConfig) -> Result<(), Error> { |
419 | let pool: Pool = pool(config)?; | |
420 | pool.lock()?.destroy()?; | |
421 | ||
422 | Ok(()) | |
423 | } | |
424 | ||
2d13dcfc | 425 | /// List snapshots |
d035ecb5 FG |
426 | pub fn list_snapshots(config: &MirrorConfig) -> Result<Vec<Snapshot>, Error> { |
427 | let _pool: Pool = pool(config)?; | |
428 | ||
429 | let mut list: Vec<Snapshot> = vec![]; | |
430 | ||
c598cb15 FG |
431 | let dir = mirror_dir(config); |
432 | ||
433 | let path = Path::new(&dir); | |
d035ecb5 FG |
434 | |
435 | proxmox_sys::fs::scandir( | |
436 | libc::AT_FDCWD, | |
437 | path, | |
438 | &SNAPSHOT_REGEX, | |
439 | |_l2_fd, snapshot, file_type| { | |
440 | if file_type != nix::dir::Type::Directory { | |
441 | return Ok(()); | |
442 | } | |
443 | ||
444 | list.push(snapshot.parse()?); | |
445 | ||
446 | Ok(()) | |
447 | }, | |
448 | )?; | |
449 | ||
45aa8bea FG |
450 | list.sort_unstable(); |
451 | ||
d035ecb5 FG |
452 | Ok(list) |
453 | } | |
454 | ||
2d13dcfc FG |
455 | /// Create a new snapshot of the remote repository, fetching and storing files as needed. |
456 | /// | |
457 | /// Operates in three phases: | |
458 | /// - Fetch and verify release files | |
459 | /// - Fetch referenced indices according to config | |
460 | /// - Fetch binary packages referenced by package indices | |
461 | /// | |
462 | /// Files will be linked in a temporary directory and only renamed to the final, valid snapshot directory at the end. In case of error, leftover `XXX.tmp` directories at the top level of `base_dir` can be safely removed once the next snapshot was successfully created, as they only contain hardlinks. | |
8b267808 FG |
463 | pub fn create_snapshot( |
464 | config: MirrorConfig, | |
465 | snapshot: &Snapshot, | |
466 | subscription: Option<SubscriptionKey>, | |
d2757931 | 467 | dry_run: bool, |
8b267808 FG |
468 | ) -> Result<(), Error> { |
469 | let auth = if let Some(product) = &config.use_subscription { | |
470 | match subscription { | |
471 | None => { | |
472 | bail!( | |
473 | "Mirror {} requires a subscription key, but none given.", | |
474 | config.id | |
475 | ); | |
476 | } | |
477 | Some(key) if key.product() == *product => { | |
478 | let base64 = base64::encode(format!("{}:{}", key.key, key.server_id)); | |
479 | Some(format!("basic {base64}")) | |
480 | } | |
481 | Some(key) => { | |
482 | bail!( | |
483 | "Repository product type '{}' and key product type '{}' don't match.", | |
484 | product, | |
485 | key.product() | |
486 | ); | |
487 | } | |
488 | } | |
489 | } else { | |
490 | None | |
491 | }; | |
492 | ||
493 | let mut config: ParsedMirrorConfig = config.try_into()?; | |
494 | config.auth = auth; | |
9ecde319 FG |
495 | |
496 | let prefix = format!("{snapshot}.tmp"); | |
497 | let prefix = Path::new(&prefix); | |
498 | ||
499 | let mut total_progress = Progress::new(); | |
9213b79a | 500 | let mut warnings = Vec::new(); |
9ecde319 FG |
501 | |
502 | let parse_release = |res: FetchResult, name: &str| -> Result<ReleaseFile, Error> { | |
503 | println!("Parsing {name}.."); | |
504 | let parsed: ReleaseFile = res.data[..].try_into()?; | |
505 | println!( | |
506 | "'{name}' file has {} referenced files..", | |
507 | parsed.files.len() | |
508 | ); | |
509 | Ok(parsed) | |
510 | }; | |
511 | ||
512 | // we want both on-disk for compat reasons | |
d2757931 | 513 | let res = fetch_release(&config, prefix, true, dry_run)?; |
9ecde319 FG |
514 | total_progress.update(&res); |
515 | let _release = parse_release(res, "Release")?; | |
516 | ||
d2757931 | 517 | let res = fetch_release(&config, prefix, false, dry_run)?; |
9ecde319 FG |
518 | total_progress.update(&res); |
519 | let release = parse_release(res, "InRelease")?; | |
520 | ||
521 | let mut per_component = HashMap::new(); | |
522 | let mut others = Vec::new(); | |
523 | let binary = &config | |
524 | .repository | |
525 | .types | |
526 | .contains(&APTRepositoryPackageType::Deb); | |
527 | let source = &config | |
528 | .repository | |
529 | .types | |
530 | .contains(&APTRepositoryPackageType::DebSrc); | |
531 | ||
532 | for (basename, references) in &release.files { | |
533 | let reference = references.first(); | |
534 | let reference = if let Some(reference) = reference { | |
535 | reference.clone() | |
536 | } else { | |
537 | continue; | |
538 | }; | |
539 | let skip_components = !&config.repository.components.contains(&reference.component); | |
540 | ||
541 | let skip = skip_components | |
542 | || match &reference.file_type { | |
543 | FileReferenceType::Ignored => true, | |
544 | FileReferenceType::PDiff => true, // would require fetching the patches as well | |
9ecde319 | 545 | FileReferenceType::Sources(_) => !source, |
8a876c01 FG |
546 | _ => { |
547 | if let Some(arch) = reference.file_type.architecture() { | |
548 | !binary || !config.architectures.contains(arch) | |
549 | } else { | |
550 | false | |
551 | } | |
552 | } | |
9ecde319 FG |
553 | }; |
554 | if skip { | |
555 | println!("Skipping {}", reference.path); | |
556 | others.push(reference); | |
557 | } else { | |
558 | let list = per_component | |
559 | .entry(reference.component) | |
560 | .or_insert_with(Vec::new); | |
561 | list.push(basename); | |
562 | } | |
563 | } | |
564 | println!(); | |
565 | ||
566 | let mut indices_size = 0_usize; | |
567 | let mut total_count = 0; | |
568 | ||
569 | for (component, references) in &per_component { | |
570 | println!("Component '{component}'"); | |
571 | ||
572 | let mut component_indices_size = 0; | |
573 | ||
574 | for basename in references { | |
575 | for reference in release.files.get(*basename).unwrap() { | |
576 | println!("\t{:?}: {:?}", reference.path, reference.file_type); | |
577 | component_indices_size += reference.size; | |
578 | } | |
579 | } | |
580 | indices_size += component_indices_size; | |
581 | ||
582 | let component_count = references.len(); | |
583 | total_count += component_count; | |
584 | ||
585 | println!("Component references count: {component_count}"); | |
586 | println!("Component indices size: {component_indices_size}"); | |
587 | if references.is_empty() { | |
588 | println!("\tNo references found.."); | |
589 | } | |
590 | } | |
591 | println!("Total indices count: {total_count}"); | |
592 | println!("Total indices size: {indices_size}"); | |
593 | ||
594 | if !others.is_empty() { | |
595 | println!("Skipped {} references", others.len()); | |
596 | } | |
597 | println!(); | |
598 | ||
599 | let mut packages_size = 0_usize; | |
600 | let mut packages_indices = HashMap::new(); | |
7829ab74 | 601 | let mut failed_references = Vec::new(); |
9ecde319 FG |
602 | for (component, references) in per_component { |
603 | println!("\nFetching indices for component '{component}'"); | |
604 | let mut component_deb_size = 0; | |
605 | let mut fetch_progress = Progress::new(); | |
606 | ||
607 | for basename in references { | |
608 | println!("\tFetching '{basename}'.."); | |
609 | let files = release.files.get(basename).unwrap(); | |
c5fed38d FG |
610 | let uncompressed_ref = files.iter().find(|reference| reference.path == *basename); |
611 | ||
9ecde319 FG |
612 | let mut package_index_data = None; |
613 | ||
614 | for reference in files { | |
615 | // if both compressed and uncompressed are referenced, the uncompressed file may not exist on the server | |
c5fed38d | 616 | if Some(reference) == uncompressed_ref && files.len() > 1 { |
9ecde319 FG |
617 | continue; |
618 | } | |
619 | ||
620 | // this will ensure the uncompressed file will be written locally | |
8063fd36 FG |
621 | let res = match fetch_index_file( |
622 | &config, | |
623 | prefix, | |
624 | reference, | |
625 | uncompressed_ref, | |
626 | release.aquire_by_hash, | |
d2757931 | 627 | dry_run, |
8063fd36 | 628 | ) { |
7829ab74 FG |
629 | Ok(res) => res, |
630 | Err(err) if !reference.file_type.is_package_index() => { | |
9213b79a | 631 | let msg = format!( |
7829ab74 FG |
632 | "Failed to fetch '{:?}' type reference '{}', skipping - {err}", |
633 | reference.file_type, reference.path | |
634 | ); | |
9213b79a FG |
635 | eprintln!("{msg}"); |
636 | warnings.push(msg); | |
7829ab74 FG |
637 | failed_references.push(reference); |
638 | continue; | |
639 | } | |
36dfc650 | 640 | Err(err) => return Err(err), |
7829ab74 | 641 | }; |
9ecde319 FG |
642 | fetch_progress.update(&res); |
643 | ||
644 | if package_index_data.is_none() && reference.file_type.is_package_index() { | |
645 | package_index_data = Some(res.data()); | |
646 | } | |
647 | } | |
648 | if let Some(data) = package_index_data { | |
649 | let packages: PackagesFile = data[..].try_into()?; | |
650 | let size: usize = packages.files.iter().map(|p| p.size).sum(); | |
651 | println!("\t{} packages totalling {size}", packages.files.len()); | |
652 | component_deb_size += size; | |
653 | ||
654 | packages_indices.entry(basename).or_insert(packages); | |
655 | } | |
656 | println!("Progress: {fetch_progress}"); | |
657 | } | |
658 | println!("Total deb size for component: {component_deb_size}"); | |
659 | packages_size += component_deb_size; | |
660 | total_progress += fetch_progress; | |
661 | } | |
662 | println!("Total deb size: {packages_size}"); | |
7829ab74 FG |
663 | if !failed_references.is_empty() { |
664 | eprintln!("Failed to download non-package-index references:"); | |
665 | for reference in failed_references { | |
666 | eprintln!("\t{}", reference.path); | |
667 | } | |
668 | } | |
9ecde319 | 669 | |
e79308e6 FG |
670 | let skipped_package_globs = if let Some(skipped_packages) = &config.skip.skip_packages { |
671 | let mut globs = GlobSetBuilder::new(); | |
672 | for glob in skipped_packages { | |
673 | let glob = Glob::new(glob)?; | |
674 | globs.add(glob); | |
675 | } | |
676 | let globs = globs.build()?; | |
677 | Some(globs) | |
678 | } else { | |
679 | None | |
680 | }; | |
681 | ||
9ecde319 | 682 | println!("\nFetching packages.."); |
d2757931 | 683 | let mut dry_run_progress = Progress::new(); |
e79308e6 FG |
684 | let mut total_skipped_count = 0usize; |
685 | let mut total_skipped_bytes = 0usize; | |
9ecde319 FG |
686 | for (basename, references) in packages_indices { |
687 | let total_files = references.files.len(); | |
688 | if total_files == 0 { | |
689 | println!("\n{basename} - no files, skipping."); | |
690 | continue; | |
691 | } else { | |
692 | println!("\n{basename} - {total_files} total file(s)"); | |
693 | } | |
694 | ||
695 | let mut fetch_progress = Progress::new(); | |
e79308e6 FG |
696 | let mut skipped_count = 0usize; |
697 | let mut skipped_bytes = 0usize; | |
9ecde319 | 698 | for package in references.files { |
e79308e6 FG |
699 | if let Some(ref sections) = &config.skip.skip_sections { |
700 | if sections.iter().any(|section| package.section == *section) { | |
701 | println!( | |
702 | "\tskipping {} - {}b (section '{}')", | |
703 | package.package, package.size, package.section | |
704 | ); | |
705 | skipped_count += 1; | |
706 | skipped_bytes += package.size; | |
707 | continue; | |
708 | } | |
709 | } | |
710 | if let Some(skipped_package_globs) = &skipped_package_globs { | |
711 | let matches = skipped_package_globs.matches(&package.package); | |
712 | if !matches.is_empty() { | |
713 | // safety, skipped_package_globs is set based on this | |
714 | let globs = config.skip.skip_packages.as_ref().unwrap(); | |
715 | let matches: Vec<String> = matches.iter().map(|i| globs[*i].clone()).collect(); | |
716 | println!( | |
717 | "\tskipping {} - {}b (package glob(s): {})", | |
718 | package.package, | |
719 | package.size, | |
720 | matches.join(", ") | |
721 | ); | |
722 | skipped_count += 1; | |
723 | skipped_bytes += package.size; | |
724 | continue; | |
725 | } | |
726 | } | |
d2757931 FG |
727 | let url = get_repo_url(&config.repository, &package.file); |
728 | ||
729 | if dry_run { | |
730 | if config.pool.contains(&package.checksums) { | |
731 | fetch_progress.update(&FetchResult { | |
732 | data: vec![], | |
733 | fetched: 0, | |
734 | }); | |
735 | } else { | |
736 | println!("\t(dry-run) GET missing '{url}' ({}b)", package.size); | |
737 | fetch_progress.update(&FetchResult { | |
738 | data: vec![], | |
739 | fetched: package.size, | |
740 | }); | |
741 | } | |
742 | } else { | |
743 | let mut full_path = PathBuf::from(prefix); | |
744 | full_path.push(&package.file); | |
745 | ||
96a80415 | 746 | match fetch_plain_file( |
d2757931 FG |
747 | &config, |
748 | &url, | |
749 | &full_path, | |
750 | package.size, | |
751 | &package.checksums, | |
752 | false, | |
753 | dry_run, | |
96a80415 FG |
754 | ) { |
755 | Ok(res) => fetch_progress.update(&res), | |
756 | Err(err) if config.ignore_errors => { | |
757 | let msg = format!( | |
758 | "{}: failed to fetch package '{}' - {}", | |
759 | basename, package.file, err, | |
760 | ); | |
761 | eprintln!("{msg}"); | |
9213b79a | 762 | warnings.push(msg); |
96a80415 | 763 | } |
36dfc650 | 764 | Err(err) => return Err(err), |
96a80415 | 765 | } |
d2757931 FG |
766 | } |
767 | ||
9ecde319 FG |
768 | if fetch_progress.file_count() % (max(total_files / 100, 1)) == 0 { |
769 | println!("\tProgress: {fetch_progress}"); | |
770 | } | |
771 | } | |
772 | println!("\tProgress: {fetch_progress}"); | |
d2757931 FG |
773 | if dry_run { |
774 | dry_run_progress += fetch_progress; | |
775 | } else { | |
776 | total_progress += fetch_progress; | |
777 | } | |
e79308e6 FG |
778 | if skipped_count > 0 { |
779 | total_skipped_count += skipped_count; | |
780 | total_skipped_bytes += skipped_bytes; | |
781 | println!("Skipped downloading {skipped_count} packages totalling {skipped_bytes}b"); | |
782 | } | |
9ecde319 FG |
783 | } |
784 | ||
d2757931 FG |
785 | if dry_run { |
786 | println!("\nDry-run Stats (indices, downloaded but not persisted):\n{total_progress}"); | |
787 | println!("\nDry-run stats (packages, new == missing):\n{dry_run_progress}"); | |
788 | } else { | |
789 | println!("\nStats: {total_progress}"); | |
790 | } | |
e79308e6 FG |
791 | if total_count > 0 { |
792 | println!( | |
793 | "Skipped downloading {total_skipped_count} packages totalling {total_skipped_bytes}b" | |
794 | ); | |
795 | } | |
9ecde319 | 796 | |
9213b79a FG |
797 | if !warnings.is_empty() { |
798 | eprintln!("Warnings:"); | |
799 | for msg in warnings { | |
800 | eprintln!("- {msg}"); | |
801 | } | |
802 | } | |
803 | ||
d2757931 | 804 | if !dry_run { |
9213b79a | 805 | println!("\nRotating temp. snapshot in-place: {prefix:?} -> \"{snapshot}\""); |
d2757931 FG |
806 | let locked = config.pool.lock()?; |
807 | locked.rename(prefix, Path::new(&format!("{snapshot}")))?; | |
808 | } | |
9ecde319 FG |
809 | |
810 | Ok(()) | |
811 | } | |
d035ecb5 | 812 | |
2d13dcfc | 813 | /// Remove a snapshot by removing the corresponding snapshot directory. To actually free up space, a garbage collection needs to be run afterwards. |
d035ecb5 FG |
814 | pub fn remove_snapshot(config: &MirrorConfig, snapshot: &Snapshot) -> Result<(), Error> { |
815 | let pool: Pool = pool(config)?; | |
816 | let path = pool.get_path(Path::new(&snapshot.to_string()))?; | |
817 | ||
818 | pool.lock()?.remove_dir(&path) | |
819 | } | |
820 | ||
2d13dcfc | 821 | /// Run a garbage collection on the underlying pool. |
d035ecb5 FG |
822 | pub fn gc(config: &MirrorConfig) -> Result<(usize, u64), Error> { |
823 | let pool: Pool = pool(config)?; | |
824 | ||
825 | pool.lock()?.gc() | |
826 | } | |
529111dc FG |
827 | |
828 | /// Print differences between two snapshots | |
829 | pub fn diff_snapshots( | |
830 | config: &MirrorConfig, | |
831 | snapshot: &Snapshot, | |
832 | other_snapshot: &Snapshot, | |
833 | ) -> Result<Diff, Error> { | |
834 | let pool = pool(config)?; | |
835 | pool.lock()?.diff_dirs( | |
836 | Path::new(&format!("{snapshot}")), | |
837 | Path::new(&format!("{other_snapshot}")), | |
838 | ) | |
839 | } |