]>
Commit | Line | Data |
---|---|---|
0a29b90c FG |
1 | use std::cell::{Cell, Ref, RefCell, RefMut}; |
2 | use std::cmp::Ordering; | |
3 | use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; | |
4 | use std::fmt; | |
5 | use std::hash; | |
6 | use std::mem; | |
7 | use std::path::{Path, PathBuf}; | |
8 | use std::rc::Rc; | |
9 | use std::time::{Duration, Instant}; | |
10 | ||
11 | use anyhow::Context; | |
12 | use bytesize::ByteSize; | |
fe692bf9 | 13 | use curl::easy::Easy; |
0a29b90c FG |
14 | use curl::multi::{EasyHandle, Multi}; |
15 | use lazycell::LazyCell; | |
0a29b90c FG |
16 | use semver::Version; |
17 | use serde::Serialize; | |
add651ee | 18 | use tracing::debug; |
0a29b90c FG |
19 | |
20 | use crate::core::compiler::{CompileKind, RustcTargetData}; | |
21 | use crate::core::dependency::DepKind; | |
22 | use crate::core::resolver::features::ForceAllTargets; | |
23 | use crate::core::resolver::{HasDevUnits, Resolve}; | |
0a29b90c | 24 | use crate::core::{Dependency, Manifest, PackageId, SourceId, Target}; |
781aab86 FG |
25 | use crate::core::{Summary, Workspace}; |
26 | use crate::sources::source::{MaybePackage, SourceMap}; | |
ed00b5ec | 27 | use crate::util::cache_lock::{CacheLock, CacheLockMode}; |
add651ee | 28 | use crate::util::errors::{CargoResult, HttpNotSuccessful}; |
0a29b90c | 29 | use crate::util::interning::InternedString; |
fe692bf9 FG |
30 | use crate::util::network::http::http_handle_and_timeout; |
31 | use crate::util::network::http::HttpTimeout; | |
0a29b90c FG |
32 | use crate::util::network::retry::{Retry, RetryResult}; |
33 | use crate::util::network::sleep::SleepTracker; | |
4b012472 | 34 | use crate::util::toml::prepare_for_publish; |
0a29b90c | 35 | use crate::util::{self, internal, Config, Progress, ProgressStyle}; |
4b012472 | 36 | use crate::util_schemas::manifest::RustVersion; |
0a29b90c FG |
37 | |
38 | pub const MANIFEST_PREAMBLE: &str = "\ | |
39 | # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO | |
40 | # | |
41 | # When uploading crates to the registry Cargo will automatically | |
42 | # \"normalize\" Cargo.toml files for maximal compatibility | |
43 | # with all versions of Cargo and also rewrite `path` dependencies | |
44 | # to registry (e.g., crates.io) dependencies. | |
45 | # | |
46 | # If you are reading this file be aware that the original Cargo.toml | |
47 | # will likely look very different (and much more reasonable). | |
48 | # See Cargo.toml.orig for the original contents. | |
49 | "; | |
50 | ||
51 | /// Information about a package that is available somewhere in the file system. | |
52 | /// | |
53 | /// A package is a `Cargo.toml` file plus all the files that are part of it. | |
54 | #[derive(Clone)] | |
55 | pub struct Package { | |
56 | inner: Rc<PackageInner>, | |
57 | } | |
58 | ||
59 | #[derive(Clone)] | |
60 | // TODO: is `manifest_path` a relic? | |
61 | struct PackageInner { | |
62 | /// The package's manifest. | |
63 | manifest: Manifest, | |
64 | /// The root of the package. | |
65 | manifest_path: PathBuf, | |
66 | } | |
67 | ||
68 | impl Ord for Package { | |
69 | fn cmp(&self, other: &Package) -> Ordering { | |
70 | self.package_id().cmp(&other.package_id()) | |
71 | } | |
72 | } | |
73 | ||
74 | impl PartialOrd for Package { | |
75 | fn partial_cmp(&self, other: &Package) -> Option<Ordering> { | |
76 | Some(self.cmp(other)) | |
77 | } | |
78 | } | |
79 | ||
80 | /// A Package in a form where `Serialize` can be derived. | |
81 | #[derive(Serialize)] | |
82 | pub struct SerializedPackage { | |
83 | name: InternedString, | |
84 | version: Version, | |
85 | id: PackageId, | |
86 | license: Option<String>, | |
87 | license_file: Option<String>, | |
88 | description: Option<String>, | |
89 | source: SourceId, | |
90 | dependencies: Vec<Dependency>, | |
91 | targets: Vec<Target>, | |
92 | features: BTreeMap<InternedString, Vec<InternedString>>, | |
93 | manifest_path: PathBuf, | |
94 | metadata: Option<toml::Value>, | |
95 | publish: Option<Vec<String>>, | |
96 | authors: Vec<String>, | |
97 | categories: Vec<String>, | |
98 | keywords: Vec<String>, | |
99 | readme: Option<String>, | |
100 | repository: Option<String>, | |
101 | homepage: Option<String>, | |
102 | documentation: Option<String>, | |
103 | edition: String, | |
104 | links: Option<String>, | |
105 | #[serde(skip_serializing_if = "Option::is_none")] | |
106 | metabuild: Option<Vec<String>>, | |
107 | default_run: Option<String>, | |
781aab86 | 108 | rust_version: Option<RustVersion>, |
0a29b90c FG |
109 | } |
110 | ||
111 | impl Package { | |
112 | /// Creates a package from a manifest and its location. | |
113 | pub fn new(manifest: Manifest, manifest_path: &Path) -> Package { | |
114 | Package { | |
115 | inner: Rc::new(PackageInner { | |
116 | manifest, | |
117 | manifest_path: manifest_path.to_path_buf(), | |
118 | }), | |
119 | } | |
120 | } | |
121 | ||
122 | /// Gets the manifest dependencies. | |
123 | pub fn dependencies(&self) -> &[Dependency] { | |
124 | self.manifest().dependencies() | |
125 | } | |
126 | /// Gets the manifest. | |
127 | pub fn manifest(&self) -> &Manifest { | |
128 | &self.inner.manifest | |
129 | } | |
130 | /// Gets the manifest. | |
131 | pub fn manifest_mut(&mut self) -> &mut Manifest { | |
132 | &mut Rc::make_mut(&mut self.inner).manifest | |
133 | } | |
134 | /// Gets the path to the manifest. | |
135 | pub fn manifest_path(&self) -> &Path { | |
136 | &self.inner.manifest_path | |
137 | } | |
138 | /// Gets the name of the package. | |
139 | pub fn name(&self) -> InternedString { | |
140 | self.package_id().name() | |
141 | } | |
142 | /// Gets the `PackageId` object for the package (fully defines a package). | |
143 | pub fn package_id(&self) -> PackageId { | |
144 | self.manifest().package_id() | |
145 | } | |
146 | /// Gets the root folder of the package. | |
147 | pub fn root(&self) -> &Path { | |
148 | self.manifest_path().parent().unwrap() | |
149 | } | |
150 | /// Gets the summary for the package. | |
151 | pub fn summary(&self) -> &Summary { | |
152 | self.manifest().summary() | |
153 | } | |
154 | /// Gets the targets specified in the manifest. | |
155 | pub fn targets(&self) -> &[Target] { | |
156 | self.manifest().targets() | |
157 | } | |
158 | /// Gets the library crate for this package, if it exists. | |
159 | pub fn library(&self) -> Option<&Target> { | |
160 | self.targets().iter().find(|t| t.is_lib()) | |
161 | } | |
162 | /// Gets the current package version. | |
163 | pub fn version(&self) -> &Version { | |
164 | self.package_id().version() | |
165 | } | |
166 | /// Gets the package authors. | |
167 | pub fn authors(&self) -> &Vec<String> { | |
168 | &self.manifest().metadata().authors | |
169 | } | |
170 | ||
171 | /// Returns `None` if the package is set to publish. | |
172 | /// Returns `Some(allowed_registries)` if publishing is limited to specified | |
173 | /// registries or if package is set to not publish. | |
174 | pub fn publish(&self) -> &Option<Vec<String>> { | |
175 | self.manifest().publish() | |
176 | } | |
177 | /// Returns `true` if this package is a proc-macro. | |
178 | pub fn proc_macro(&self) -> bool { | |
179 | self.targets().iter().any(|target| target.proc_macro()) | |
180 | } | |
181 | /// Gets the package's minimum Rust version. | |
781aab86 | 182 | pub fn rust_version(&self) -> Option<&RustVersion> { |
0a29b90c FG |
183 | self.manifest().rust_version() |
184 | } | |
185 | ||
186 | /// Returns `true` if the package uses a custom build script for any target. | |
187 | pub fn has_custom_build(&self) -> bool { | |
188 | self.targets().iter().any(|t| t.is_custom_build()) | |
189 | } | |
190 | ||
191 | pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Package { | |
192 | Package { | |
193 | inner: Rc::new(PackageInner { | |
194 | manifest: self.manifest().clone().map_source(to_replace, replace_with), | |
195 | manifest_path: self.manifest_path().to_owned(), | |
196 | }), | |
197 | } | |
198 | } | |
199 | ||
200 | pub fn to_registry_toml(&self, ws: &Workspace<'_>) -> CargoResult<String> { | |
4b012472 | 201 | let manifest = prepare_for_publish(self.manifest().original(), ws, self.root())?; |
0a29b90c FG |
202 | let toml = toml::to_string_pretty(&manifest)?; |
203 | Ok(format!("{}\n{}", MANIFEST_PREAMBLE, toml)) | |
204 | } | |
205 | ||
206 | /// Returns if package should include `Cargo.lock`. | |
207 | pub fn include_lockfile(&self) -> bool { | |
208 | self.targets().iter().any(|t| t.is_example() || t.is_bin()) | |
209 | } | |
210 | ||
211 | pub fn serialized(&self) -> SerializedPackage { | |
212 | let summary = self.manifest().summary(); | |
213 | let package_id = summary.package_id(); | |
214 | let manmeta = self.manifest().metadata(); | |
215 | // Filter out metabuild targets. They are an internal implementation | |
216 | // detail that is probably not relevant externally. There's also not a | |
217 | // real path to show in `src_path`, and this avoids changing the format. | |
218 | let targets: Vec<Target> = self | |
219 | .manifest() | |
220 | .targets() | |
221 | .iter() | |
222 | .filter(|t| t.src_path().is_path()) | |
223 | .cloned() | |
224 | .collect(); | |
225 | // Convert Vec<FeatureValue> to Vec<InternedString> | |
226 | let features = summary | |
227 | .features() | |
228 | .iter() | |
229 | .map(|(k, v)| { | |
230 | ( | |
231 | *k, | |
232 | v.iter() | |
233 | .map(|fv| InternedString::new(&fv.to_string())) | |
234 | .collect(), | |
235 | ) | |
236 | }) | |
237 | .collect(); | |
238 | ||
239 | SerializedPackage { | |
240 | name: package_id.name(), | |
241 | version: package_id.version().clone(), | |
242 | id: package_id, | |
243 | license: manmeta.license.clone(), | |
244 | license_file: manmeta.license_file.clone(), | |
245 | description: manmeta.description.clone(), | |
246 | source: summary.source_id(), | |
247 | dependencies: summary.dependencies().to_vec(), | |
248 | targets, | |
249 | features, | |
250 | manifest_path: self.manifest_path().to_path_buf(), | |
251 | metadata: self.manifest().custom_metadata().cloned(), | |
252 | authors: manmeta.authors.clone(), | |
253 | categories: manmeta.categories.clone(), | |
254 | keywords: manmeta.keywords.clone(), | |
255 | readme: manmeta.readme.clone(), | |
256 | repository: manmeta.repository.clone(), | |
257 | homepage: manmeta.homepage.clone(), | |
258 | documentation: manmeta.documentation.clone(), | |
259 | edition: self.manifest().edition().to_string(), | |
260 | links: self.manifest().links().map(|s| s.to_owned()), | |
261 | metabuild: self.manifest().metabuild().cloned(), | |
262 | publish: self.publish().as_ref().cloned(), | |
263 | default_run: self.manifest().default_run().map(|s| s.to_owned()), | |
781aab86 | 264 | rust_version: self.rust_version().cloned(), |
0a29b90c FG |
265 | } |
266 | } | |
267 | } | |
268 | ||
269 | impl fmt::Display for Package { | |
270 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
271 | write!(f, "{}", self.summary().package_id()) | |
272 | } | |
273 | } | |
274 | ||
275 | impl fmt::Debug for Package { | |
276 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { | |
277 | f.debug_struct("Package") | |
278 | .field("id", &self.summary().package_id()) | |
279 | .field("..", &"..") | |
280 | .finish() | |
281 | } | |
282 | } | |
283 | ||
284 | impl PartialEq for Package { | |
285 | fn eq(&self, other: &Package) -> bool { | |
286 | self.package_id() == other.package_id() | |
287 | } | |
288 | } | |
289 | ||
290 | impl Eq for Package {} | |
291 | ||
292 | impl hash::Hash for Package { | |
293 | fn hash<H: hash::Hasher>(&self, into: &mut H) { | |
294 | self.package_id().hash(into) | |
295 | } | |
296 | } | |
297 | ||
298 | /// A set of packages, with the intent to download. | |
299 | /// | |
300 | /// This is primarily used to convert a set of `PackageId`s to `Package`s. It | |
301 | /// will download as needed, or used the cached download if available. | |
302 | pub struct PackageSet<'cfg> { | |
303 | packages: HashMap<PackageId, LazyCell<Package>>, | |
304 | sources: RefCell<SourceMap<'cfg>>, | |
305 | config: &'cfg Config, | |
306 | multi: Multi, | |
307 | /// Used to prevent reusing the PackageSet to download twice. | |
308 | downloading: Cell<bool>, | |
309 | /// Whether or not to use curl HTTP/2 multiplexing. | |
310 | multiplexing: bool, | |
311 | } | |
312 | ||
313 | /// Helper for downloading crates. | |
314 | pub struct Downloads<'a, 'cfg> { | |
315 | set: &'a PackageSet<'cfg>, | |
316 | /// When a download is started, it is added to this map. The key is a | |
317 | /// "token" (see `Download::token`). It is removed once the download is | |
318 | /// finished. | |
319 | pending: HashMap<usize, (Download<'cfg>, EasyHandle)>, | |
320 | /// Set of packages currently being downloaded. This should stay in sync | |
321 | /// with `pending`. | |
322 | pending_ids: HashSet<PackageId>, | |
323 | /// Downloads that have failed and are waiting to retry again later. | |
324 | sleeping: SleepTracker<(Download<'cfg>, Easy)>, | |
325 | /// The final result of each download. A pair `(token, result)`. This is a | |
326 | /// temporary holding area, needed because curl can report multiple | |
327 | /// downloads at once, but the main loop (`wait`) is written to only | |
328 | /// handle one at a time. | |
329 | results: Vec<(usize, Result<(), curl::Error>)>, | |
330 | /// The next ID to use for creating a token (see `Download::token`). | |
331 | next: usize, | |
332 | /// Progress bar. | |
333 | progress: RefCell<Option<Progress<'cfg>>>, | |
334 | /// Number of downloads that have successfully finished. | |
335 | downloads_finished: usize, | |
336 | /// Total bytes for all successfully downloaded packages. | |
337 | downloaded_bytes: u64, | |
338 | /// Size (in bytes) and package name of the largest downloaded package. | |
781aab86 | 339 | largest: (u64, InternedString), |
0a29b90c FG |
340 | /// Time when downloading started. |
341 | start: Instant, | |
342 | /// Indicates *all* downloads were successful. | |
343 | success: bool, | |
344 | ||
345 | /// Timeout management, both of timeout thresholds as well as whether or not | |
346 | /// our connection has timed out (and accompanying message if it has). | |
347 | /// | |
348 | /// Note that timeout management is done manually here instead of in libcurl | |
349 | /// because we want to apply timeouts to an entire batch of operations, not | |
350 | /// any one particular single operation. | |
fe692bf9 | 351 | timeout: HttpTimeout, |
0a29b90c FG |
352 | /// Last time bytes were received. |
353 | updated_at: Cell<Instant>, | |
354 | /// This is a slow-speed check. It is reset to `now + timeout_duration` | |
355 | /// every time at least `threshold` bytes are received. If the current | |
356 | /// time ever exceeds `next_speed_check`, then give up and report a | |
357 | /// timeout error. | |
358 | next_speed_check: Cell<Instant>, | |
359 | /// This is the slow-speed threshold byte count. It starts at the | |
360 | /// configured threshold value (default 10), and is decremented by the | |
361 | /// number of bytes received in each chunk. If it is <= zero, the | |
362 | /// threshold has been met and data is being received fast enough not to | |
363 | /// trigger a timeout; reset `next_speed_check` and set this back to the | |
364 | /// configured threshold. | |
365 | next_speed_check_bytes_threshold: Cell<u64>, | |
366 | /// Global filesystem lock to ensure only one Cargo is downloading at a | |
367 | /// time. | |
ed00b5ec | 368 | _lock: CacheLock<'cfg>, |
0a29b90c FG |
369 | } |
370 | ||
371 | struct Download<'cfg> { | |
372 | /// The token for this download, used as the key of the `Downloads::pending` map | |
373 | /// and stored in `EasyHandle` as well. | |
374 | token: usize, | |
375 | ||
376 | /// The package that we're downloading. | |
377 | id: PackageId, | |
378 | ||
379 | /// Actual downloaded data, updated throughout the lifetime of this download. | |
380 | data: RefCell<Vec<u8>>, | |
381 | ||
382 | /// HTTP headers for debugging. | |
383 | headers: RefCell<Vec<String>>, | |
384 | ||
385 | /// The URL that we're downloading from, cached here for error messages and | |
386 | /// reenqueuing. | |
387 | url: String, | |
388 | ||
389 | /// A descriptive string to print when we've finished downloading this crate. | |
390 | descriptor: String, | |
391 | ||
392 | /// Statistics updated from the progress callback in libcurl. | |
393 | total: Cell<u64>, | |
394 | current: Cell<u64>, | |
395 | ||
396 | /// The moment we started this transfer at. | |
397 | start: Instant, | |
398 | timed_out: Cell<Option<String>>, | |
399 | ||
400 | /// Logic used to track retrying this download if it's a spurious failure. | |
401 | retry: Retry<'cfg>, | |
402 | } | |
403 | ||
404 | impl<'cfg> PackageSet<'cfg> { | |
405 | pub fn new( | |
406 | package_ids: &[PackageId], | |
407 | sources: SourceMap<'cfg>, | |
408 | config: &'cfg Config, | |
409 | ) -> CargoResult<PackageSet<'cfg>> { | |
410 | // We've enabled the `http2` feature of `curl` in Cargo, so treat | |
411 | // failures here as fatal as it would indicate a build-time problem. | |
412 | let mut multi = Multi::new(); | |
413 | let multiplexing = config.http_config()?.multiplexing.unwrap_or(true); | |
414 | multi | |
415 | .pipelining(false, multiplexing) | |
416 | .with_context(|| "failed to enable multiplexing/pipelining in curl")?; | |
417 | ||
418 | // let's not flood crates.io with connections | |
419 | multi.set_max_host_connections(2)?; | |
420 | ||
421 | Ok(PackageSet { | |
422 | packages: package_ids | |
423 | .iter() | |
424 | .map(|&id| (id, LazyCell::new())) | |
425 | .collect(), | |
426 | sources: RefCell::new(sources), | |
427 | config, | |
428 | multi, | |
429 | downloading: Cell::new(false), | |
430 | multiplexing, | |
431 | }) | |
432 | } | |
433 | ||
434 | pub fn package_ids(&self) -> impl Iterator<Item = PackageId> + '_ { | |
435 | self.packages.keys().cloned() | |
436 | } | |
437 | ||
438 | pub fn packages(&self) -> impl Iterator<Item = &Package> { | |
439 | self.packages.values().filter_map(|p| p.borrow()) | |
440 | } | |
441 | ||
442 | pub fn enable_download<'a>(&'a self) -> CargoResult<Downloads<'a, 'cfg>> { | |
443 | assert!(!self.downloading.replace(true)); | |
fe692bf9 | 444 | let timeout = HttpTimeout::new(self.config)?; |
0a29b90c FG |
445 | Ok(Downloads { |
446 | start: Instant::now(), | |
447 | set: self, | |
448 | next: 0, | |
449 | pending: HashMap::new(), | |
450 | pending_ids: HashSet::new(), | |
451 | sleeping: SleepTracker::new(), | |
452 | results: Vec::new(), | |
453 | progress: RefCell::new(Some(Progress::with_style( | |
454 | "Downloading", | |
455 | ProgressStyle::Ratio, | |
456 | self.config, | |
457 | ))), | |
458 | downloads_finished: 0, | |
459 | downloaded_bytes: 0, | |
781aab86 | 460 | largest: (0, InternedString::new("")), |
0a29b90c FG |
461 | success: false, |
462 | updated_at: Cell::new(Instant::now()), | |
463 | timeout, | |
464 | next_speed_check: Cell::new(Instant::now()), | |
465 | next_speed_check_bytes_threshold: Cell::new(0), | |
ed00b5ec FG |
466 | _lock: self |
467 | .config | |
468 | .acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?, | |
0a29b90c FG |
469 | }) |
470 | } | |
471 | ||
472 | pub fn get_one(&self, id: PackageId) -> CargoResult<&Package> { | |
473 | if let Some(pkg) = self.packages.get(&id).and_then(|slot| slot.borrow()) { | |
474 | return Ok(pkg); | |
475 | } | |
476 | Ok(self.get_many(Some(id))?.remove(0)) | |
477 | } | |
478 | ||
479 | pub fn get_many(&self, ids: impl IntoIterator<Item = PackageId>) -> CargoResult<Vec<&Package>> { | |
480 | let mut pkgs = Vec::new(); | |
ed00b5ec FG |
481 | let _lock = self |
482 | .config | |
483 | .acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?; | |
0a29b90c FG |
484 | let mut downloads = self.enable_download()?; |
485 | for id in ids { | |
486 | pkgs.extend(downloads.start(id)?); | |
487 | } | |
488 | while downloads.remaining() > 0 { | |
489 | pkgs.push(downloads.wait()?); | |
490 | } | |
491 | downloads.success = true; | |
4b012472 FG |
492 | drop(downloads); |
493 | ||
494 | let mut deferred = self.config.deferred_global_last_use()?; | |
495 | deferred.save_no_error(self.config); | |
0a29b90c FG |
496 | Ok(pkgs) |
497 | } | |
498 | ||
499 | /// Downloads any packages accessible from the give root ids. | |
500 | pub fn download_accessible( | |
501 | &self, | |
502 | resolve: &Resolve, | |
503 | root_ids: &[PackageId], | |
504 | has_dev_units: HasDevUnits, | |
505 | requested_kinds: &[CompileKind], | |
506 | target_data: &RustcTargetData<'cfg>, | |
507 | force_all_targets: ForceAllTargets, | |
508 | ) -> CargoResult<()> { | |
509 | fn collect_used_deps( | |
510 | used: &mut BTreeSet<PackageId>, | |
511 | resolve: &Resolve, | |
512 | pkg_id: PackageId, | |
513 | has_dev_units: HasDevUnits, | |
514 | requested_kinds: &[CompileKind], | |
515 | target_data: &RustcTargetData<'_>, | |
516 | force_all_targets: ForceAllTargets, | |
517 | ) -> CargoResult<()> { | |
518 | if !used.insert(pkg_id) { | |
519 | return Ok(()); | |
520 | } | |
521 | let filtered_deps = PackageSet::filter_deps( | |
522 | pkg_id, | |
523 | resolve, | |
524 | has_dev_units, | |
525 | requested_kinds, | |
526 | target_data, | |
527 | force_all_targets, | |
528 | ); | |
529 | for (pkg_id, _dep) in filtered_deps { | |
530 | collect_used_deps( | |
531 | used, | |
532 | resolve, | |
533 | pkg_id, | |
534 | has_dev_units, | |
535 | requested_kinds, | |
536 | target_data, | |
537 | force_all_targets, | |
538 | )?; | |
539 | } | |
540 | Ok(()) | |
541 | } | |
542 | ||
543 | // This is sorted by PackageId to get consistent behavior and error | |
544 | // messages for Cargo's testsuite. Perhaps there is a better ordering | |
545 | // that optimizes download time? | |
546 | let mut to_download = BTreeSet::new(); | |
547 | ||
548 | for id in root_ids { | |
549 | collect_used_deps( | |
550 | &mut to_download, | |
551 | resolve, | |
552 | *id, | |
553 | has_dev_units, | |
554 | requested_kinds, | |
555 | target_data, | |
556 | force_all_targets, | |
557 | )?; | |
558 | } | |
559 | self.get_many(to_download.into_iter())?; | |
560 | Ok(()) | |
561 | } | |
562 | ||
563 | /// Check if there are any dependency packages that violate artifact constraints | |
564 | /// to instantly abort, or that do not have any libs which results in warnings. | |
565 | pub(crate) fn warn_no_lib_packages_and_artifact_libs_overlapping_deps( | |
566 | &self, | |
567 | ws: &Workspace<'cfg>, | |
568 | resolve: &Resolve, | |
569 | root_ids: &[PackageId], | |
570 | has_dev_units: HasDevUnits, | |
571 | requested_kinds: &[CompileKind], | |
572 | target_data: &RustcTargetData<'_>, | |
573 | force_all_targets: ForceAllTargets, | |
574 | ) -> CargoResult<()> { | |
575 | let no_lib_pkgs: BTreeMap<PackageId, Vec<(&Package, &HashSet<Dependency>)>> = root_ids | |
576 | .iter() | |
577 | .map(|&root_id| { | |
578 | let dep_pkgs_to_deps: Vec<_> = PackageSet::filter_deps( | |
579 | root_id, | |
580 | resolve, | |
581 | has_dev_units, | |
582 | requested_kinds, | |
583 | target_data, | |
584 | force_all_targets, | |
585 | ) | |
586 | .collect(); | |
587 | ||
588 | let dep_pkgs_and_deps = dep_pkgs_to_deps | |
589 | .into_iter() | |
590 | .filter(|(_id, deps)| deps.iter().any(|dep| dep.maybe_lib())) | |
591 | .filter_map(|(dep_package_id, deps)| { | |
592 | self.get_one(dep_package_id).ok().and_then(|dep_pkg| { | |
593 | (!dep_pkg.targets().iter().any(|t| t.is_lib())).then(|| (dep_pkg, deps)) | |
594 | }) | |
595 | }) | |
596 | .collect(); | |
597 | (root_id, dep_pkgs_and_deps) | |
598 | }) | |
599 | .collect(); | |
600 | ||
601 | for (pkg_id, dep_pkgs) in no_lib_pkgs { | |
602 | for (_dep_pkg_without_lib_target, deps) in dep_pkgs { | |
603 | for dep in deps.iter().filter(|dep| { | |
604 | dep.artifact() | |
605 | .map(|artifact| artifact.is_lib()) | |
606 | .unwrap_or(true) | |
607 | }) { | |
608 | ws.config().shell().warn(&format!( | |
609 | "{} ignoring invalid dependency `{}` which is missing a lib target", | |
610 | pkg_id, | |
611 | dep.name_in_toml(), | |
612 | ))?; | |
613 | } | |
614 | } | |
615 | } | |
616 | Ok(()) | |
617 | } | |
618 | ||
619 | fn filter_deps<'a>( | |
620 | pkg_id: PackageId, | |
621 | resolve: &'a Resolve, | |
622 | has_dev_units: HasDevUnits, | |
623 | requested_kinds: &'a [CompileKind], | |
624 | target_data: &'a RustcTargetData<'_>, | |
625 | force_all_targets: ForceAllTargets, | |
626 | ) -> impl Iterator<Item = (PackageId, &'a HashSet<Dependency>)> + 'a { | |
627 | resolve | |
628 | .deps(pkg_id) | |
629 | .filter(move |&(_id, deps)| { | |
630 | deps.iter().any(|dep| { | |
631 | if dep.kind() == DepKind::Development && has_dev_units == HasDevUnits::No { | |
632 | return false; | |
633 | } | |
634 | if force_all_targets == ForceAllTargets::No { | |
635 | let activated = requested_kinds | |
636 | .iter() | |
637 | .chain(Some(&CompileKind::Host)) | |
638 | .any(|kind| target_data.dep_platform_activated(dep, *kind)); | |
639 | if !activated { | |
640 | return false; | |
641 | } | |
642 | } | |
643 | true | |
644 | }) | |
645 | }) | |
646 | .into_iter() | |
647 | } | |
648 | ||
649 | pub fn sources(&self) -> Ref<'_, SourceMap<'cfg>> { | |
650 | self.sources.borrow() | |
651 | } | |
652 | ||
653 | pub fn sources_mut(&self) -> RefMut<'_, SourceMap<'cfg>> { | |
654 | self.sources.borrow_mut() | |
655 | } | |
656 | ||
657 | /// Merge the given set into self. | |
658 | pub fn add_set(&mut self, set: PackageSet<'cfg>) { | |
659 | assert!(!self.downloading.get()); | |
660 | assert!(!set.downloading.get()); | |
661 | for (pkg_id, p_cell) in set.packages { | |
662 | self.packages.entry(pkg_id).or_insert(p_cell); | |
663 | } | |
664 | let mut sources = self.sources.borrow_mut(); | |
665 | let other_sources = set.sources.into_inner(); | |
666 | sources.add_source_map(other_sources); | |
667 | } | |
668 | } | |
669 | ||
670 | impl<'a, 'cfg> Downloads<'a, 'cfg> { | |
671 | /// Starts to download the package for the `id` specified. | |
672 | /// | |
673 | /// Returns `None` if the package is queued up for download and will | |
674 | /// eventually be returned from `wait_for_download`. Returns `Some(pkg)` if | |
675 | /// the package is ready and doesn't need to be downloaded. | |
676 | pub fn start(&mut self, id: PackageId) -> CargoResult<Option<&'a Package>> { | |
677 | self.start_inner(id) | |
678 | .with_context(|| format!("failed to download `{}`", id)) | |
679 | } | |
680 | ||
681 | fn start_inner(&mut self, id: PackageId) -> CargoResult<Option<&'a Package>> { | |
682 | // First up see if we've already cached this package, in which case | |
683 | // there's nothing to do. | |
684 | let slot = self | |
685 | .set | |
686 | .packages | |
687 | .get(&id) | |
688 | .ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?; | |
689 | if let Some(pkg) = slot.borrow() { | |
690 | return Ok(Some(pkg)); | |
691 | } | |
692 | ||
693 | // Ask the original source for this `PackageId` for the corresponding | |
694 | // package. That may immediately come back and tell us that the package | |
695 | // is ready, or it could tell us that it needs to be downloaded. | |
696 | let mut sources = self.set.sources.borrow_mut(); | |
697 | let source = sources | |
698 | .get_mut(id.source_id()) | |
699 | .ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?; | |
700 | let pkg = source | |
701 | .download(id) | |
702 | .with_context(|| "unable to get packages from source")?; | |
703 | let (url, descriptor, authorization) = match pkg { | |
704 | MaybePackage::Ready(pkg) => { | |
705 | debug!("{} doesn't need a download", id); | |
706 | assert!(slot.fill(pkg).is_ok()); | |
707 | return Ok(Some(slot.borrow().unwrap())); | |
708 | } | |
709 | MaybePackage::Download { | |
710 | url, | |
711 | descriptor, | |
712 | authorization, | |
713 | } => (url, descriptor, authorization), | |
714 | }; | |
715 | ||
716 | // Ok we're going to download this crate, so let's set up all our | |
717 | // internal state and hand off an `Easy` handle to our libcurl `Multi` | |
718 | // handle. This won't actually start the transfer, but later it'll | |
719 | // happen during `wait_for_download` | |
720 | let token = self.next; | |
721 | self.next += 1; | |
781aab86 | 722 | debug!(target: "network", "downloading {} as {}", id, token); |
0a29b90c FG |
723 | assert!(self.pending_ids.insert(id)); |
724 | ||
fe692bf9 | 725 | let (mut handle, _timeout) = http_handle_and_timeout(self.set.config)?; |
0a29b90c FG |
726 | handle.get(true)?; |
727 | handle.url(&url)?; | |
728 | handle.follow_location(true)?; // follow redirects | |
729 | ||
730 | // Add authorization header. | |
731 | if let Some(authorization) = authorization { | |
732 | let mut headers = curl::easy::List::new(); | |
733 | headers.append(&format!("Authorization: {}", authorization))?; | |
734 | handle.http_headers(headers)?; | |
735 | } | |
736 | ||
fe692bf9 FG |
737 | // Enable HTTP/2 if possible. |
738 | crate::try_old_curl_http2_pipewait!(self.set.multiplexing, handle); | |
0a29b90c FG |
739 | |
740 | handle.write_function(move |buf| { | |
781aab86 | 741 | debug!(target: "network", "{} - {} bytes of data", token, buf.len()); |
0a29b90c FG |
742 | tls::with(|downloads| { |
743 | if let Some(downloads) = downloads { | |
744 | downloads.pending[&token] | |
745 | .0 | |
746 | .data | |
747 | .borrow_mut() | |
748 | .extend_from_slice(buf); | |
749 | } | |
750 | }); | |
751 | Ok(buf.len()) | |
752 | })?; | |
753 | handle.header_function(move |data| { | |
754 | tls::with(|downloads| { | |
755 | if let Some(downloads) = downloads { | |
756 | // Headers contain trailing \r\n, trim them to make it easier | |
757 | // to work with. | |
758 | let h = String::from_utf8_lossy(data).trim().to_string(); | |
add651ee | 759 | downloads.pending[&token].0.headers.borrow_mut().push(h); |
0a29b90c FG |
760 | } |
761 | }); | |
762 | true | |
763 | })?; | |
764 | ||
765 | handle.progress(true)?; | |
766 | handle.progress_function(move |dl_total, dl_cur, _, _| { | |
767 | tls::with(|downloads| match downloads { | |
768 | Some(d) => d.progress(token, dl_total as u64, dl_cur as u64), | |
769 | None => false, | |
770 | }) | |
771 | })?; | |
772 | ||
773 | // If the progress bar isn't enabled then it may be awhile before the | |
774 | // first crate finishes downloading so we inform immediately that we're | |
775 | // downloading crates here. | |
776 | if self.downloads_finished == 0 | |
777 | && self.pending.is_empty() | |
778 | && !self.progress.borrow().as_ref().unwrap().is_enabled() | |
779 | { | |
780 | self.set | |
781 | .config | |
782 | .shell() | |
783 | .status("Downloading", "crates ...")?; | |
784 | } | |
785 | ||
786 | let dl = Download { | |
787 | token, | |
788 | data: RefCell::new(Vec::new()), | |
789 | headers: RefCell::new(Vec::new()), | |
790 | id, | |
791 | url, | |
792 | descriptor, | |
793 | total: Cell::new(0), | |
794 | current: Cell::new(0), | |
795 | start: Instant::now(), | |
796 | timed_out: Cell::new(None), | |
797 | retry: Retry::new(self.set.config)?, | |
798 | }; | |
799 | self.enqueue(dl, handle)?; | |
800 | self.tick(WhyTick::DownloadStarted)?; | |
801 | ||
802 | Ok(None) | |
803 | } | |
804 | ||
805 | /// Returns the number of crates that are still downloading. | |
806 | pub fn remaining(&self) -> usize { | |
807 | self.pending.len() + self.sleeping.len() | |
808 | } | |
809 | ||
810 | /// Blocks the current thread waiting for a package to finish downloading. | |
811 | /// | |
812 | /// This method will wait for a previously enqueued package to finish | |
813 | /// downloading and return a reference to it after it's done downloading. | |
814 | /// | |
815 | /// # Panics | |
816 | /// | |
817 | /// This function will panic if there are no remaining downloads. | |
818 | pub fn wait(&mut self) -> CargoResult<&'a Package> { | |
819 | let (dl, data) = loop { | |
820 | assert_eq!(self.pending.len(), self.pending_ids.len()); | |
821 | let (token, result) = self.wait_for_curl()?; | |
781aab86 | 822 | debug!(target: "network", "{} finished with {:?}", token, result); |
0a29b90c FG |
823 | |
824 | let (mut dl, handle) = self | |
825 | .pending | |
826 | .remove(&token) | |
827 | .expect("got a token for a non-in-progress transfer"); | |
828 | let data = mem::take(&mut *dl.data.borrow_mut()); | |
829 | let headers = mem::take(&mut *dl.headers.borrow_mut()); | |
830 | let mut handle = self.set.multi.remove(handle)?; | |
831 | self.pending_ids.remove(&dl.id); | |
832 | ||
833 | // Check if this was a spurious error. If it was a spurious error | |
834 | // then we want to re-enqueue our request for another attempt and | |
835 | // then we wait for another request to finish. | |
836 | let ret = { | |
837 | let timed_out = &dl.timed_out; | |
838 | let url = &dl.url; | |
839 | dl.retry.r#try(|| { | |
840 | if let Err(e) = result { | |
841 | // If this error is "aborted by callback" then that's | |
842 | // probably because our progress callback aborted due to | |
843 | // a timeout. We'll find out by looking at the | |
844 | // `timed_out` field, looking for a descriptive message. | |
845 | // If one is found we switch the error code (to ensure | |
846 | // it's flagged as spurious) and then attach our extra | |
847 | // information to the error. | |
848 | if !e.is_aborted_by_callback() { | |
849 | return Err(e.into()); | |
850 | } | |
851 | ||
852 | return Err(match timed_out.replace(None) { | |
853 | Some(msg) => { | |
854 | let code = curl_sys::CURLE_OPERATION_TIMEDOUT; | |
855 | let mut err = curl::Error::new(code); | |
856 | err.set_extra(msg); | |
857 | err | |
858 | } | |
859 | None => e, | |
860 | } | |
861 | .into()); | |
862 | } | |
863 | ||
864 | let code = handle.response_code()?; | |
865 | if code != 200 && code != 0 { | |
866 | return Err(HttpNotSuccessful::new_from_handle( | |
867 | &mut handle, | |
868 | &url, | |
869 | data, | |
870 | headers, | |
871 | ) | |
872 | .into()); | |
873 | } | |
874 | Ok(data) | |
875 | }) | |
876 | }; | |
877 | match ret { | |
878 | RetryResult::Success(data) => break (dl, data), | |
879 | RetryResult::Err(e) => { | |
880 | return Err(e.context(format!("failed to download from `{}`", dl.url))) | |
881 | } | |
882 | RetryResult::Retry(sleep) => { | |
781aab86 | 883 | debug!(target: "network", "download retry {} for {sleep}ms", dl.url); |
0a29b90c FG |
884 | self.sleeping.push(sleep, (dl, handle)); |
885 | } | |
886 | } | |
887 | }; | |
888 | ||
889 | // If the progress bar isn't enabled then we still want to provide some | |
890 | // semblance of progress of how we're downloading crates, and if the | |
891 | // progress bar is enabled this provides a good log of what's happening. | |
892 | self.progress.borrow_mut().as_mut().unwrap().clear(); | |
893 | self.set | |
894 | .config | |
895 | .shell() | |
896 | .status("Downloaded", &dl.descriptor)?; | |
897 | ||
898 | self.downloads_finished += 1; | |
899 | self.downloaded_bytes += dl.total.get(); | |
900 | if dl.total.get() > self.largest.0 { | |
781aab86 | 901 | self.largest = (dl.total.get(), dl.id.name()); |
0a29b90c FG |
902 | } |
903 | ||
904 | // We're about to synchronously extract the crate below. While we're | |
905 | // doing that our download progress won't actually be updated, nor do we | |
906 | // have a great view into the progress of the extraction. Let's prepare | |
907 | // the user for this CPU-heavy step if it looks like it'll take some | |
908 | // time to do so. | |
909 | if dl.total.get() < ByteSize::kb(400).0 { | |
910 | self.tick(WhyTick::DownloadFinished)?; | |
911 | } else { | |
912 | self.tick(WhyTick::Extracting(&dl.id.name()))?; | |
913 | } | |
914 | ||
915 | // Inform the original source that the download is finished which | |
916 | // should allow us to actually get the package and fill it in now. | |
917 | let mut sources = self.set.sources.borrow_mut(); | |
918 | let source = sources | |
919 | .get_mut(dl.id.source_id()) | |
920 | .ok_or_else(|| internal(format!("couldn't find source for `{}`", dl.id)))?; | |
921 | let start = Instant::now(); | |
922 | let pkg = source.finish_download(dl.id, data)?; | |
923 | ||
924 | // Assume that no time has passed while we were calling | |
925 | // `finish_download`, update all speed checks and timeout limits of all | |
926 | // active downloads to make sure they don't fire because of a slowly | |
927 | // extracted tarball. | |
928 | let finish_dur = start.elapsed(); | |
929 | self.updated_at.set(self.updated_at.get() + finish_dur); | |
930 | self.next_speed_check | |
931 | .set(self.next_speed_check.get() + finish_dur); | |
932 | ||
933 | let slot = &self.set.packages[&dl.id]; | |
934 | assert!(slot.fill(pkg).is_ok()); | |
935 | Ok(slot.borrow().unwrap()) | |
936 | } | |
937 | ||
938 | fn enqueue(&mut self, dl: Download<'cfg>, handle: Easy) -> CargoResult<()> { | |
939 | let mut handle = self.set.multi.add(handle)?; | |
940 | let now = Instant::now(); | |
941 | handle.set_token(dl.token)?; | |
942 | self.updated_at.set(now); | |
943 | self.next_speed_check.set(now + self.timeout.dur); | |
944 | self.next_speed_check_bytes_threshold | |
945 | .set(u64::from(self.timeout.low_speed_limit)); | |
946 | dl.timed_out.set(None); | |
947 | dl.current.set(0); | |
948 | dl.total.set(0); | |
949 | self.pending.insert(dl.token, (dl, handle)); | |
950 | Ok(()) | |
951 | } | |
952 | ||
953 | /// Block, waiting for curl. Returns a token and a `Result` for that token | |
954 | /// (`Ok` means the download successfully finished). | |
955 | fn wait_for_curl(&mut self) -> CargoResult<(usize, Result<(), curl::Error>)> { | |
956 | // This is the main workhorse loop. We use libcurl's portable `wait` | |
957 | // method to actually perform blocking. This isn't necessarily too | |
958 | // efficient in terms of fd management, but we should only be juggling | |
959 | // a few anyway. | |
960 | // | |
961 | // Here we start off by asking the `multi` handle to do some work via | |
962 | // the `perform` method. This will actually do I/O work (non-blocking) | |
963 | // and attempt to make progress. Afterwards we ask about the `messages` | |
964 | // contained in the handle which will inform us if anything has finished | |
965 | // transferring. | |
966 | // | |
967 | // If we've got a finished transfer after all that work we break out | |
968 | // and process the finished transfer at the end. Otherwise we need to | |
969 | // actually block waiting for I/O to happen, which we achieve with the | |
970 | // `wait` method on `multi`. | |
971 | loop { | |
972 | self.add_sleepers()?; | |
973 | let n = tls::set(self, || { | |
974 | self.set | |
975 | .multi | |
976 | .perform() | |
977 | .with_context(|| "failed to perform http requests") | |
978 | })?; | |
781aab86 | 979 | debug!(target: "network", "handles remaining: {}", n); |
0a29b90c FG |
980 | let results = &mut self.results; |
981 | let pending = &self.pending; | |
982 | self.set.multi.messages(|msg| { | |
983 | let token = msg.token().expect("failed to read token"); | |
984 | let handle = &pending[&token].1; | |
985 | if let Some(result) = msg.result_for(handle) { | |
986 | results.push((token, result)); | |
987 | } else { | |
781aab86 | 988 | debug!(target: "network", "message without a result (?)"); |
0a29b90c FG |
989 | } |
990 | }); | |
991 | ||
992 | if let Some(pair) = results.pop() { | |
993 | break Ok(pair); | |
994 | } | |
995 | assert_ne!(self.remaining(), 0); | |
996 | if self.pending.is_empty() { | |
997 | let delay = self.sleeping.time_to_next().unwrap(); | |
781aab86 | 998 | debug!(target: "network", "sleeping main thread for {delay:?}"); |
0a29b90c FG |
999 | std::thread::sleep(delay); |
1000 | } else { | |
1001 | let min_timeout = Duration::new(1, 0); | |
1002 | let timeout = self.set.multi.get_timeout()?.unwrap_or(min_timeout); | |
1003 | let timeout = timeout.min(min_timeout); | |
1004 | self.set | |
1005 | .multi | |
1006 | .wait(&mut [], timeout) | |
1007 | .with_context(|| "failed to wait on curl `Multi`")?; | |
1008 | } | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | fn add_sleepers(&mut self) -> CargoResult<()> { | |
1013 | for (dl, handle) in self.sleeping.to_retry() { | |
1014 | self.pending_ids.insert(dl.id); | |
1015 | self.enqueue(dl, handle)?; | |
1016 | } | |
1017 | Ok(()) | |
1018 | } | |
1019 | ||
1020 | fn progress(&self, token: usize, total: u64, cur: u64) -> bool { | |
1021 | let dl = &self.pending[&token].0; | |
1022 | dl.total.set(total); | |
1023 | let now = Instant::now(); | |
1024 | if cur > dl.current.get() { | |
1025 | let delta = cur - dl.current.get(); | |
1026 | let threshold = self.next_speed_check_bytes_threshold.get(); | |
1027 | ||
1028 | dl.current.set(cur); | |
1029 | self.updated_at.set(now); | |
1030 | ||
1031 | if delta >= threshold { | |
1032 | self.next_speed_check.set(now + self.timeout.dur); | |
1033 | self.next_speed_check_bytes_threshold | |
1034 | .set(u64::from(self.timeout.low_speed_limit)); | |
1035 | } else { | |
1036 | self.next_speed_check_bytes_threshold.set(threshold - delta); | |
1037 | } | |
1038 | } | |
1039 | if self.tick(WhyTick::DownloadUpdate).is_err() { | |
1040 | return false; | |
1041 | } | |
1042 | ||
1043 | // If we've spent too long not actually receiving any data we time out. | |
1044 | if now > self.updated_at.get() + self.timeout.dur { | |
1045 | self.updated_at.set(now); | |
1046 | let msg = format!( | |
1047 | "failed to download any data for `{}` within {}s", | |
1048 | dl.id, | |
1049 | self.timeout.dur.as_secs() | |
1050 | ); | |
1051 | dl.timed_out.set(Some(msg)); | |
1052 | return false; | |
1053 | } | |
1054 | ||
1055 | // If we reached the point in time that we need to check our speed | |
1056 | // limit, see if we've transferred enough data during this threshold. If | |
1057 | // it fails this check then we fail because the download is going too | |
1058 | // slowly. | |
1059 | if now >= self.next_speed_check.get() { | |
1060 | self.next_speed_check.set(now + self.timeout.dur); | |
1061 | assert!(self.next_speed_check_bytes_threshold.get() > 0); | |
1062 | let msg = format!( | |
1063 | "download of `{}` failed to transfer more \ | |
1064 | than {} bytes in {}s", | |
1065 | dl.id, | |
1066 | self.timeout.low_speed_limit, | |
1067 | self.timeout.dur.as_secs() | |
1068 | ); | |
1069 | dl.timed_out.set(Some(msg)); | |
1070 | return false; | |
1071 | } | |
1072 | ||
1073 | true | |
1074 | } | |
1075 | ||
1076 | fn tick(&self, why: WhyTick<'_>) -> CargoResult<()> { | |
1077 | let mut progress = self.progress.borrow_mut(); | |
1078 | let progress = progress.as_mut().unwrap(); | |
1079 | ||
1080 | if let WhyTick::DownloadUpdate = why { | |
1081 | if !progress.update_allowed() { | |
1082 | return Ok(()); | |
1083 | } | |
1084 | } | |
1085 | let pending = self.remaining(); | |
1086 | let mut msg = if pending == 1 { | |
1087 | format!("{} crate", pending) | |
1088 | } else { | |
1089 | format!("{} crates", pending) | |
1090 | }; | |
1091 | match why { | |
1092 | WhyTick::Extracting(krate) => { | |
1093 | msg.push_str(&format!(", extracting {} ...", krate)); | |
1094 | } | |
1095 | _ => { | |
1096 | let mut dur = Duration::new(0, 0); | |
1097 | let mut remaining = 0; | |
1098 | for (dl, _) in self.pending.values() { | |
1099 | dur += dl.start.elapsed(); | |
1100 | // If the total/current look weird just throw out the data | |
1101 | // point, sounds like curl has more to learn before we have | |
1102 | // the true information. | |
1103 | if dl.total.get() >= dl.current.get() { | |
1104 | remaining += dl.total.get() - dl.current.get(); | |
1105 | } | |
1106 | } | |
1107 | if remaining > 0 && dur > Duration::from_millis(500) { | |
1108 | msg.push_str(&format!(", remaining bytes: {}", ByteSize(remaining))); | |
1109 | } | |
1110 | } | |
1111 | } | |
1112 | progress.print_now(&msg) | |
1113 | } | |
1114 | } | |
1115 | ||
1116 | #[derive(Copy, Clone)] | |
1117 | enum WhyTick<'a> { | |
1118 | DownloadStarted, | |
1119 | DownloadUpdate, | |
1120 | DownloadFinished, | |
1121 | Extracting(&'a str), | |
1122 | } | |
1123 | ||
1124 | impl<'a, 'cfg> Drop for Downloads<'a, 'cfg> { | |
1125 | fn drop(&mut self) { | |
1126 | self.set.downloading.set(false); | |
1127 | let progress = self.progress.get_mut().take().unwrap(); | |
1128 | // Don't print a download summary if we're not using a progress bar, | |
1129 | // we've already printed lots of `Downloading...` items. | |
1130 | if !progress.is_enabled() { | |
1131 | return; | |
1132 | } | |
1133 | // If we didn't download anything, no need for a summary. | |
1134 | if self.downloads_finished == 0 { | |
1135 | return; | |
1136 | } | |
1137 | // If an error happened, let's not clutter up the output. | |
1138 | if !self.success { | |
1139 | return; | |
1140 | } | |
1141 | // pick the correct plural of crate(s) | |
1142 | let crate_string = if self.downloads_finished == 1 { | |
1143 | "crate" | |
1144 | } else { | |
1145 | "crates" | |
1146 | }; | |
1147 | let mut status = format!( | |
1148 | "{} {} ({}) in {}", | |
1149 | self.downloads_finished, | |
1150 | crate_string, | |
1151 | ByteSize(self.downloaded_bytes), | |
1152 | util::elapsed(self.start.elapsed()) | |
1153 | ); | |
1154 | // print the size of largest crate if it was >1mb | |
1155 | // however don't print if only a single crate was downloaded | |
1156 | // because it is obvious that it will be the largest then | |
1157 | if self.largest.0 > ByteSize::mb(1).0 && self.downloads_finished > 1 { | |
1158 | status.push_str(&format!( | |
1159 | " (largest was `{}` at {})", | |
1160 | self.largest.1, | |
1161 | ByteSize(self.largest.0), | |
1162 | )); | |
1163 | } | |
1164 | // Clear progress before displaying final summary. | |
1165 | drop(progress); | |
1166 | drop(self.set.config.shell().status("Downloaded", status)); | |
1167 | } | |
1168 | } | |
1169 | ||
1170 | mod tls { | |
1171 | use std::cell::Cell; | |
1172 | ||
1173 | use super::Downloads; | |
1174 | ||
1175 | thread_local!(static PTR: Cell<usize> = Cell::new(0)); | |
1176 | ||
1177 | pub(crate) fn with<R>(f: impl FnOnce(Option<&Downloads<'_, '_>>) -> R) -> R { | |
1178 | let ptr = PTR.with(|p| p.get()); | |
1179 | if ptr == 0 { | |
1180 | f(None) | |
1181 | } else { | |
1182 | unsafe { f(Some(&*(ptr as *const Downloads<'_, '_>))) } | |
1183 | } | |
1184 | } | |
1185 | ||
1186 | pub(crate) fn set<R>(dl: &Downloads<'_, '_>, f: impl FnOnce() -> R) -> R { | |
1187 | struct Reset<'a, T: Copy>(&'a Cell<T>, T); | |
1188 | ||
1189 | impl<'a, T: Copy> Drop for Reset<'a, T> { | |
1190 | fn drop(&mut self) { | |
1191 | self.0.set(self.1); | |
1192 | } | |
1193 | } | |
1194 | ||
1195 | PTR.with(|p| { | |
1196 | let _reset = Reset(p, p.get()); | |
1197 | p.set(dl as *const Downloads<'_, '_> as usize); | |
1198 | f() | |
1199 | }) | |
1200 | } | |
1201 | } |