1 use std
::cell
::{Cell, Ref, RefCell, RefMut}
;
2 use std
::cmp
::Ordering
;
3 use std
::collections
::{BTreeMap, BTreeSet, HashMap, HashSet}
;
7 use std
::path
::{Path, PathBuf}
;
9 use std
::time
::{Duration, Instant}
;
12 use bytesize
::ByteSize
;
13 use curl
::easy
::{Easy, HttpVersion}
;
14 use curl
::multi
::{EasyHandle, Multi}
;
15 use lazycell
::LazyCell
;
16 use log
::{debug, warn}
;
20 use crate::core
::compiler
::{CompileKind, RustcTargetData}
;
21 use crate::core
::dependency
::DepKind
;
22 use crate::core
::resolver
::features
::ForceAllTargets
;
23 use crate::core
::resolver
::{HasDevUnits, Resolve}
;
24 use crate::core
::source
::MaybePackage
;
25 use crate::core
::{Dependency, Manifest, PackageId, SourceId, Target}
;
26 use crate::core
::{SourceMap, Summary, Workspace}
;
28 use crate::util
::config
::PackageCacheLock
;
29 use crate::util
::errors
::{CargoResult, HttpNot200}
;
30 use crate::util
::interning
::InternedString
;
31 use crate::util
::network
::Retry
;
32 use crate::util
::{self, internal, Config, Progress, ProgressStyle}
;
34 pub const MANIFEST_PREAMBLE
: &str = "\
35 # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
37 # When uploading crates to the registry Cargo will automatically
38 # \"normalize\" Cargo.toml files for maximal compatibility
39 # with all versions of Cargo and also rewrite `path` dependencies
40 # to registry (e.g., crates.io) dependencies.
42 # If you are reading this file be aware that the original Cargo.toml
43 # will likely look very different (and much more reasonable).
44 # See Cargo.toml.orig for the original contents.
47 /// Information about a package that is available somewhere in the file system.
49 /// A package is a `Cargo.toml` file plus all the files that are part of it.
51 // TODO: is `manifest_path` a relic?
54 inner
: Rc
<PackageInner
>,
59 /// The package's manifest.
61 /// The root of the package.
62 manifest_path
: PathBuf
,
65 impl Ord
for Package
{
66 fn cmp(&self, other
: &Package
) -> Ordering
{
67 self.package_id().cmp(&other
.package_id())
71 impl PartialOrd
for Package
{
72 fn partial_cmp(&self, other
: &Package
) -> Option
<Ordering
> {
77 /// A Package in a form where `Serialize` can be derived.
79 pub struct SerializedPackage
{
83 license
: Option
<String
>,
84 license_file
: Option
<String
>,
85 description
: Option
<String
>,
87 dependencies
: Vec
<Dependency
>,
89 features
: BTreeMap
<InternedString
, Vec
<InternedString
>>,
90 manifest_path
: PathBuf
,
91 metadata
: Option
<toml
::Value
>,
92 publish
: Option
<Vec
<String
>>,
94 categories
: Vec
<String
>,
95 keywords
: Vec
<String
>,
96 readme
: Option
<String
>,
97 repository
: Option
<String
>,
98 homepage
: Option
<String
>,
99 documentation
: Option
<String
>,
101 links
: Option
<String
>,
102 #[serde(skip_serializing_if = "Option::is_none")]
103 metabuild
: Option
<Vec
<String
>>,
104 default_run
: Option
<String
>,
105 rust_version
: Option
<String
>,
109 /// Creates a package from a manifest and its location.
110 pub fn new(manifest
: Manifest
, manifest_path
: &Path
) -> Package
{
112 inner
: Rc
::new(PackageInner
{
114 manifest_path
: manifest_path
.to_path_buf(),
119 /// Gets the manifest dependencies.
120 pub fn dependencies(&self) -> &[Dependency
] {
121 self.manifest().dependencies()
123 /// Gets the manifest.
124 pub fn manifest(&self) -> &Manifest
{
127 /// Gets the manifest.
128 pub fn manifest_mut(&mut self) -> &mut Manifest
{
129 &mut Rc
::make_mut(&mut self.inner
).manifest
131 /// Gets the path to the manifest.
132 pub fn manifest_path(&self) -> &Path
{
133 &self.inner
.manifest_path
135 /// Gets the name of the package.
136 pub fn name(&self) -> InternedString
{
137 self.package_id().name()
139 /// Gets the `PackageId` object for the package (fully defines a package).
140 pub fn package_id(&self) -> PackageId
{
141 self.manifest().package_id()
143 /// Gets the root folder of the package.
144 pub fn root(&self) -> &Path
{
145 self.manifest_path().parent().unwrap()
147 /// Gets the summary for the package.
148 pub fn summary(&self) -> &Summary
{
149 self.manifest().summary()
151 /// Gets the targets specified in the manifest.
152 pub fn targets(&self) -> &[Target
] {
153 self.manifest().targets()
155 /// Gets the library crate for this package, if it exists.
156 pub fn library(&self) -> Option
<&Target
> {
157 self.targets().iter().find(|t
| t
.is_lib())
159 /// Gets the current package version.
160 pub fn version(&self) -> &Version
{
161 self.package_id().version()
163 /// Gets the package authors.
164 pub fn authors(&self) -> &Vec
<String
> {
165 &self.manifest().metadata().authors
168 /// Returns `None` if the package is set to publish.
169 /// Returns `Some(allowed_registries)` if publishing is limited to specified
170 /// registries or if package is set to not publish.
171 pub fn publish(&self) -> &Option
<Vec
<String
>> {
172 self.manifest().publish()
174 /// Returns `true` if this package is a proc-macro.
175 pub fn proc_macro(&self) -> bool
{
176 self.targets().iter().any(|target
| target
.proc_macro())
178 /// Gets the package's minimum Rust version.
179 pub fn rust_version(&self) -> Option
<&str> {
180 self.manifest().rust_version()
183 /// Returns `true` if the package uses a custom build script for any target.
184 pub fn has_custom_build(&self) -> bool
{
185 self.targets().iter().any(|t
| t
.is_custom_build())
188 pub fn map_source(self, to_replace
: SourceId
, replace_with
: SourceId
) -> Package
{
190 inner
: Rc
::new(PackageInner
{
191 manifest
: self.manifest().clone().map_source(to_replace
, replace_with
),
192 manifest_path
: self.manifest_path().to_owned(),
197 pub fn to_registry_toml(&self, ws
: &Workspace
<'_
>) -> CargoResult
<String
> {
201 .prepare_for_publish(ws
, self.root())?
;
202 let toml
= toml
::to_string(&manifest
)?
;
203 Ok(format
!("{}\n{}", MANIFEST_PREAMBLE
, toml
))
206 /// Returns if package should include `Cargo.lock`.
207 pub fn include_lockfile(&self) -> bool
{
208 self.targets().iter().any(|t
| t
.is_example() || t
.is_bin())
211 pub fn serialized(&self) -> SerializedPackage
{
212 let summary
= self.manifest().summary();
213 let package_id
= summary
.package_id();
214 let manmeta
= self.manifest().metadata();
215 // Filter out metabuild targets. They are an internal implementation
216 // detail that is probably not relevant externally. There's also not a
217 // real path to show in `src_path`, and this avoids changing the format.
218 let targets
: Vec
<Target
> = self
222 .filter(|t
| t
.src_path().is_path())
225 // Convert Vec<FeatureValue> to Vec<InternedString>
226 let features
= summary
233 .map(|fv
| InternedString
::new(&fv
.to_string()))
240 name
: package_id
.name(),
241 version
: package_id
.version().clone(),
243 license
: manmeta
.license
.clone(),
244 license_file
: manmeta
.license_file
.clone(),
245 description
: manmeta
.description
.clone(),
246 source
: summary
.source_id(),
247 dependencies
: summary
.dependencies().to_vec(),
250 manifest_path
: self.manifest_path().to_path_buf(),
251 metadata
: self.manifest().custom_metadata().cloned(),
252 authors
: manmeta
.authors
.clone(),
253 categories
: manmeta
.categories
.clone(),
254 keywords
: manmeta
.keywords
.clone(),
255 readme
: manmeta
.readme
.clone(),
256 repository
: manmeta
.repository
.clone(),
257 homepage
: manmeta
.homepage
.clone(),
258 documentation
: manmeta
.documentation
.clone(),
259 edition
: self.manifest().edition().to_string(),
260 links
: self.manifest().links().map(|s
| s
.to_owned()),
261 metabuild
: self.manifest().metabuild().cloned(),
262 publish
: self.publish().as_ref().cloned(),
263 default_run
: self.manifest().default_run().map(|s
| s
.to_owned()),
264 rust_version
: self.rust_version().map(|s
| s
.to_owned()),
269 impl fmt
::Display
for Package
{
270 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
271 write
!(f
, "{}", self.summary().package_id())
275 impl fmt
::Debug
for Package
{
276 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
277 f
.debug_struct("Package")
278 .field("id", &self.summary().package_id())
284 impl PartialEq
for Package
{
285 fn eq(&self, other
: &Package
) -> bool
{
286 self.package_id() == other
.package_id()
290 impl Eq
for Package {}
292 impl hash
::Hash
for Package
{
293 fn hash
<H
: hash
::Hasher
>(&self, into
: &mut H
) {
294 self.package_id().hash(into
)
298 /// A set of packages, with the intent to download.
300 /// This is primarily used to convert a set of `PackageId`s to `Package`s. It
301 /// will download as needed, or used the cached download if available.
302 pub struct PackageSet
<'cfg
> {
303 packages
: HashMap
<PackageId
, LazyCell
<Package
>>,
304 sources
: RefCell
<SourceMap
<'cfg
>>,
305 config
: &'cfg Config
,
307 /// Used to prevent reusing the PackageSet to download twice.
308 downloading
: Cell
<bool
>,
309 /// Whether or not to use curl HTTP/2 multiplexing.
313 /// Helper for downloading crates.
314 pub struct Downloads
<'a
, 'cfg
> {
315 set
: &'a PackageSet
<'cfg
>,
316 /// When a download is started, it is added to this map. The key is a
317 /// "token" (see `Download::token`). It is removed once the download is
319 pending
: HashMap
<usize, (Download
<'cfg
>, EasyHandle
)>,
320 /// Set of packages currently being downloaded. This should stay in sync
322 pending_ids
: HashSet
<PackageId
>,
323 /// The final result of each download. A pair `(token, result)`. This is a
324 /// temporary holding area, needed because curl can report multiple
325 /// downloads at once, but the main loop (`wait`) is written to only
326 /// handle one at a time.
327 results
: Vec
<(usize, Result
<(), curl
::Error
>)>,
328 /// The next ID to use for creating a token (see `Download::token`).
331 progress
: RefCell
<Option
<Progress
<'cfg
>>>,
332 /// Number of downloads that have successfully finished.
333 downloads_finished
: usize,
334 /// Total bytes for all successfully downloaded packages.
335 downloaded_bytes
: u64,
336 /// Size (in bytes) and package name of the largest downloaded package.
337 largest
: (u64, String
),
338 /// Time when downloading started.
340 /// Indicates *all* downloads were successful.
343 /// Timeout management, both of timeout thresholds as well as whether or not
344 /// our connection has timed out (and accompanying message if it has).
346 /// Note that timeout management is done manually here instead of in libcurl
347 /// because we want to apply timeouts to an entire batch of operations, not
348 /// any one particular single operation.
349 timeout
: ops
::HttpTimeout
,
350 /// Last time bytes were received.
351 updated_at
: Cell
<Instant
>,
352 /// This is a slow-speed check. It is reset to `now + timeout_duration`
353 /// every time at least `threshold` bytes are received. If the current
354 /// time ever exceeds `next_speed_check`, then give up and report a
356 next_speed_check
: Cell
<Instant
>,
357 /// This is the slow-speed threshold byte count. It starts at the
358 /// configured threshold value (default 10), and is decremented by the
359 /// number of bytes received in each chunk. If it is <= zero, the
360 /// threshold has been met and data is being received fast enough not to
361 /// trigger a timeout; reset `next_speed_check` and set this back to the
362 /// configured threshold.
363 next_speed_check_bytes_threshold
: Cell
<u64>,
364 /// Global filesystem lock to ensure only one Cargo is downloading at a
366 _lock
: PackageCacheLock
<'cfg
>,
369 struct Download
<'cfg
> {
370 /// The token for this download, used as the key of the `Downloads::pending` map
371 /// and stored in `EasyHandle` as well.
374 /// The package that we're downloading.
377 /// Actual downloaded data, updated throughout the lifetime of this download.
378 data
: RefCell
<Vec
<u8>>,
380 /// The URL that we're downloading from, cached here for error messages and
384 /// A descriptive string to print when we've finished downloading this crate.
387 /// Statistics updated from the progress callback in libcurl.
391 /// The moment we started this transfer at.
393 timed_out
: Cell
<Option
<String
>>,
395 /// Logic used to track retrying this download if it's a spurious failure.
399 impl<'cfg
> PackageSet
<'cfg
> {
401 package_ids
: &[PackageId
],
402 sources
: SourceMap
<'cfg
>,
403 config
: &'cfg Config
,
404 ) -> CargoResult
<PackageSet
<'cfg
>> {
405 // We've enabled the `http2` feature of `curl` in Cargo, so treat
406 // failures here as fatal as it would indicate a build-time problem.
408 // Note that the multiplexing support is pretty new so we're having it
409 // off-by-default temporarily.
411 // Also note that pipelining is disabled as curl authors have indicated
412 // that it's buggy, and we've empirically seen that it's buggy with HTTP
414 let mut multi
= Multi
::new();
415 let multiplexing
= config
.http_config()?
.multiplexing
.unwrap_or(true);
417 .pipelining(false, multiplexing
)
418 .with_context(|| "failed to enable multiplexing/pipelining in curl")?
;
420 // let's not flood crates.io with connections
421 multi
.set_max_host_connections(2)?
;
424 packages
: package_ids
426 .map(|&id
| (id
, LazyCell
::new()))
428 sources
: RefCell
::new(sources
),
431 downloading
: Cell
::new(false),
436 pub fn package_ids(&self) -> impl Iterator
<Item
= PackageId
> + '_
{
437 self.packages
.keys().cloned()
440 pub fn packages(&self) -> impl Iterator
<Item
= &Package
> {
441 self.packages
.values().filter_map(|p
| p
.borrow())
444 pub fn enable_download
<'a
>(&'a
self) -> CargoResult
<Downloads
<'a
, 'cfg
>> {
445 assert
!(!self.downloading
.replace(true));
446 let timeout
= ops
::HttpTimeout
::new(self.config
)?
;
448 start
: Instant
::now(),
451 pending
: HashMap
::new(),
452 pending_ids
: HashSet
::new(),
454 progress
: RefCell
::new(Some(Progress
::with_style(
456 ProgressStyle
::Ratio
,
459 downloads_finished
: 0,
461 largest
: (0, String
::new()),
463 updated_at
: Cell
::new(Instant
::now()),
465 next_speed_check
: Cell
::new(Instant
::now()),
466 next_speed_check_bytes_threshold
: Cell
::new(0),
467 _lock
: self.config
.acquire_package_cache_lock()?
,
471 pub fn get_one(&self, id
: PackageId
) -> CargoResult
<&Package
> {
472 if let Some(pkg
) = self.packages
.get(&id
).and_then(|slot
| slot
.borrow()) {
475 Ok(self.get_many(Some(id
))?
.remove(0))
478 pub fn get_many(&self, ids
: impl IntoIterator
<Item
= PackageId
>) -> CargoResult
<Vec
<&Package
>> {
479 let mut pkgs
= Vec
::new();
480 let mut downloads
= self.enable_download()?
;
482 pkgs
.extend(downloads
.start(id
)?
);
484 while downloads
.remaining() > 0 {
485 pkgs
.push(downloads
.wait()?
);
487 downloads
.success
= true;
491 /// Downloads any packages accessible from the give root ids.
492 pub fn download_accessible(
495 root_ids
: &[PackageId
],
496 has_dev_units
: HasDevUnits
,
497 requested_kinds
: &[CompileKind
],
498 target_data
: &RustcTargetData
<'cfg
>,
499 force_all_targets
: ForceAllTargets
,
500 ) -> CargoResult
<()> {
501 fn collect_used_deps(
502 used
: &mut BTreeSet
<PackageId
>,
505 has_dev_units
: HasDevUnits
,
506 requested_kinds
: &[CompileKind
],
507 target_data
: &RustcTargetData
<'_
>,
508 force_all_targets
: ForceAllTargets
,
509 ) -> CargoResult
<()> {
510 if !used
.insert(pkg_id
) {
513 let filtered_deps
= PackageSet
::filter_deps(
521 for pkg_id
in filtered_deps
{
535 // This is sorted by PackageId to get consistent behavior and error
536 // messages for Cargo's testsuite. Perhaps there is a better ordering
537 // that optimizes download time?
538 let mut to_download
= BTreeSet
::new();
551 self.get_many(to_download
.into_iter())?
;
555 /// Check if there are any dependency packages that do not have any libs.
556 pub(crate) fn no_lib_pkgs(
559 root_ids
: &[PackageId
],
560 has_dev_units
: HasDevUnits
,
561 requested_kinds
: &[CompileKind
],
562 target_data
: &RustcTargetData
<'_
>,
563 force_all_targets
: ForceAllTargets
,
564 ) -> BTreeMap
<PackageId
, Vec
<&Package
>> {
568 let pkgs
= PackageSet
::filter_deps(
576 .filter_map(|package_id
| {
577 if let Ok(dep_pkg
) = self.get_one(package_id
) {
578 if !dep_pkg
.targets().iter().any(|t
| t
.is_lib()) {
595 resolve
: &'a Resolve
,
596 has_dev_units
: HasDevUnits
,
597 requested_kinds
: &'a
[CompileKind
],
598 target_data
: &'a RustcTargetData
<'_
>,
599 force_all_targets
: ForceAllTargets
,
600 ) -> impl Iterator
<Item
= PackageId
> + 'a
{
603 .filter(move |&(_id
, deps
)| {
604 deps
.iter().any(|dep
| {
605 if dep
.kind() == DepKind
::Development
&& has_dev_units
== HasDevUnits
::No
{
608 if force_all_targets
== ForceAllTargets
::No
{
609 let activated
= requested_kinds
611 .chain(Some(&CompileKind
::Host
))
612 .any(|kind
| target_data
.dep_platform_activated(dep
, *kind
));
620 .map(|(pkg_id
, _
)| pkg_id
)
624 pub fn sources(&self) -> Ref
<'_
, SourceMap
<'cfg
>> {
625 self.sources
.borrow()
628 pub fn sources_mut(&self) -> RefMut
<'_
, SourceMap
<'cfg
>> {
629 self.sources
.borrow_mut()
632 /// Merge the given set into self.
633 pub fn add_set(&mut self, set
: PackageSet
<'cfg
>) {
634 assert
!(!self.downloading
.get());
635 assert
!(!set
.downloading
.get());
636 for (pkg_id
, p_cell
) in set
.packages
{
637 self.packages
.entry(pkg_id
).or_insert(p_cell
);
639 let mut sources
= self.sources
.borrow_mut();
640 let other_sources
= set
.sources
.into_inner();
641 sources
.add_source_map(other_sources
);
645 // When dynamically linked against libcurl, we want to ignore some failures
646 // when using old versions that don't support certain features.
647 macro_rules
! try_old_curl
{
648 ($e
:expr
, $msg
:expr
) => {
650 if cfg
!(target_os
= "macos") {
651 if let Err(e
) = result
{
652 warn
!("ignoring libcurl {} error: {}", $msg
, e
);
655 result
.with_context(|| {
656 anyhow
::format_err
!("failed to enable {}, is curl not built right?", $msg
)
662 impl<'a
, 'cfg
> Downloads
<'a
, 'cfg
> {
663 /// Starts to download the package for the `id` specified.
665 /// Returns `None` if the package is queued up for download and will
666 /// eventually be returned from `wait_for_download`. Returns `Some(pkg)` if
667 /// the package is ready and doesn't need to be downloaded.
668 pub fn start(&mut self, id
: PackageId
) -> CargoResult
<Option
<&'a Package
>> {
670 .with_context(|| format
!("failed to download `{}`", id
))
673 fn start_inner(&mut self, id
: PackageId
) -> CargoResult
<Option
<&'a Package
>> {
674 // First up see if we've already cached this package, in which case
675 // there's nothing to do.
680 .ok_or_else(|| internal(format
!("couldn't find `{}` in package set", id
)))?
;
681 if let Some(pkg
) = slot
.borrow() {
682 return Ok(Some(pkg
));
685 // Ask the original source fo this `PackageId` for the corresponding
686 // package. That may immediately come back and tell us that the package
687 // is ready, or it could tell us that it needs to be downloaded.
688 let mut sources
= self.set
.sources
.borrow_mut();
690 .get_mut(id
.source_id())
691 .ok_or_else(|| internal(format
!("couldn't find source for `{}`", id
)))?
;
694 .with_context(|| "unable to get packages from source")?
;
695 let (url
, descriptor
) = match pkg
{
696 MaybePackage
::Ready(pkg
) => {
697 debug
!("{} doesn't need a download", id
);
698 assert
!(slot
.fill(pkg
).is_ok());
699 return Ok(Some(slot
.borrow().unwrap()));
701 MaybePackage
::Download { url, descriptor }
=> (url
, descriptor
),
704 // Ok we're going to download this crate, so let's set up all our
705 // internal state and hand off an `Easy` handle to our libcurl `Multi`
706 // handle. This won't actually start the transfer, but later it'll
707 // happen during `wait_for_download`
708 let token
= self.next
;
710 debug
!("downloading {} as {}", id
, token
);
711 assert
!(self.pending_ids
.insert(id
));
713 let (mut handle
, _timeout
) = ops
::http_handle_and_timeout(self.set
.config
)?
;
716 handle
.follow_location(true)?
; // follow redirects
718 // Enable HTTP/2 to be used as it'll allow true multiplexing which makes
719 // downloads much faster.
721 // Currently Cargo requests the `http2` feature of the `curl` crate
722 // which means it should always be built in. On OSX, however, we ship
723 // cargo still linked against the system libcurl. Building curl with
724 // ALPN support for HTTP/2 requires newer versions of OSX (the
725 // SecureTransport API) than we want to ship Cargo for. By linking Cargo
726 // against the system libcurl then older curl installations won't use
727 // HTTP/2 but newer ones will. All that to basically say we ignore
728 // errors here on OSX, but consider this a fatal error to not activate
729 // HTTP/2 on all other platforms.
730 if self.set
.multiplexing
{
731 try_old_curl
!(handle
.http_version(HttpVersion
::V2
), "HTTP2");
733 handle
.http_version(HttpVersion
::V11
)?
;
736 // This is an option to `libcurl` which indicates that if there's a
737 // bunch of parallel requests to the same host they all wait until the
738 // pipelining status of the host is known. This means that we won't
739 // initiate dozens of connections to crates.io, but rather only one.
740 // Once the main one is opened we realized that pipelining is possible
741 // and multiplexing is possible with static.crates.io. All in all this
742 // reduces the number of connections done to a more manageable state.
743 try_old_curl
!(handle
.pipewait(true), "pipewait");
745 handle
.write_function(move |buf
| {
746 debug
!("{} - {} bytes of data", token
, buf
.len());
747 tls
::with(|downloads
| {
748 if let Some(downloads
) = downloads
{
749 downloads
.pending
[&token
]
753 .extend_from_slice(buf
);
759 handle
.progress(true)?
;
760 handle
.progress_function(move |dl_total
, dl_cur
, _
, _
| {
761 tls
::with(|downloads
| match downloads
{
762 Some(d
) => d
.progress(token
, dl_total
as u64, dl_cur
as u64),
767 // If the progress bar isn't enabled then it may be awhile before the
768 // first crate finishes downloading so we inform immediately that we're
769 // downloading crates here.
770 if self.downloads_finished
== 0
771 && self.pending
.is_empty()
772 && !self.progress
.borrow().as_ref().unwrap().is_enabled()
777 .status("Downloading", "crates ...")?
;
782 data
: RefCell
::new(Vec
::new()),
787 current
: Cell
::new(0),
788 start
: Instant
::now(),
789 timed_out
: Cell
::new(None
),
790 retry
: Retry
::new(self.set
.config
)?
,
792 self.enqueue(dl
, handle
)?
;
793 self.tick(WhyTick
::DownloadStarted
)?
;
798 /// Returns the number of crates that are still downloading.
799 pub fn remaining(&self) -> usize {
803 /// Blocks the current thread waiting for a package to finish downloading.
805 /// This method will wait for a previously enqueued package to finish
806 /// downloading and return a reference to it after it's done downloading.
810 /// This function will panic if there are no remaining downloads.
811 pub fn wait(&mut self) -> CargoResult
<&'a Package
> {
812 let (dl
, data
) = loop {
813 assert_eq
!(self.pending
.len(), self.pending_ids
.len());
814 let (token
, result
) = self.wait_for_curl()?
;
815 debug
!("{} finished with {:?}", token
, result
);
817 let (mut dl
, handle
) = self
820 .expect("got a token for a non-in-progress transfer");
821 let data
= mem
::take(&mut *dl
.data
.borrow_mut());
822 let mut handle
= self.set
.multi
.remove(handle
)?
;
823 self.pending_ids
.remove(&dl
.id
);
825 // Check if this was a spurious error. If it was a spurious error
826 // then we want to re-enqueue our request for another attempt and
827 // then we wait for another request to finish.
829 let timed_out
= &dl
.timed_out
;
833 if let Err(e
) = result
{
834 // If this error is "aborted by callback" then that's
835 // probably because our progress callback aborted due to
836 // a timeout. We'll find out by looking at the
837 // `timed_out` field, looking for a descriptive message.
838 // If one is found we switch the error code (to ensure
839 // it's flagged as spurious) and then attach our extra
840 // information to the error.
841 if !e
.is_aborted_by_callback() {
842 return Err(e
.into());
845 return Err(match timed_out
.replace(None
) {
847 let code
= curl_sys
::CURLE_OPERATION_TIMEDOUT
;
848 let mut err
= curl
::Error
::new(code
);
857 let code
= handle
.response_code()?
;
858 if code
!= 200 && code
!= 0 {
859 let url
= handle
.effective_url()?
.unwrap_or(url
);
860 return Err(HttpNot200
{
862 url
: url
.to_string(),
868 .with_context(|| format
!("failed to download from `{}`", dl
.url
))?
871 Some(()) => break (dl
, data
),
873 self.pending_ids
.insert(dl
.id
);
874 self.enqueue(dl
, handle
)?
879 // If the progress bar isn't enabled then we still want to provide some
880 // semblance of progress of how we're downloading crates, and if the
881 // progress bar is enabled this provides a good log of what's happening.
882 self.progress
.borrow_mut().as_mut().unwrap().clear();
886 .status("Downloaded", &dl
.descriptor
)?
;
888 self.downloads_finished
+= 1;
889 self.downloaded_bytes
+= dl
.total
.get();
890 if dl
.total
.get() > self.largest
.0 {
891 self.largest
= (dl
.total
.get(), dl
.id
.name().to_string());
894 // We're about to synchronously extract the crate below. While we're
895 // doing that our download progress won't actually be updated, nor do we
896 // have a great view into the progress of the extraction. Let's prepare
897 // the user for this CPU-heavy step if it looks like it'll take some
899 if dl
.total
.get() < ByteSize
::kb(400).0 {
900 self.tick(WhyTick
::DownloadFinished
)?
;
902 self.tick(WhyTick
::Extracting(&dl
.id
.name()))?
;
905 // Inform the original source that the download is finished which
906 // should allow us to actually get the package and fill it in now.
907 let mut sources
= self.set
.sources
.borrow_mut();
909 .get_mut(dl
.id
.source_id())
910 .ok_or_else(|| internal(format
!("couldn't find source for `{}`", dl
.id
)))?
;
911 let start
= Instant
::now();
912 let pkg
= source
.finish_download(dl
.id
, data
)?
;
914 // Assume that no time has passed while we were calling
915 // `finish_download`, update all speed checks and timeout limits of all
916 // active downloads to make sure they don't fire because of a slowly
917 // extracted tarball.
918 let finish_dur
= start
.elapsed();
919 self.updated_at
.set(self.updated_at
.get() + finish_dur
);
920 self.next_speed_check
921 .set(self.next_speed_check
.get() + finish_dur
);
923 let slot
= &self.set
.packages
[&dl
.id
];
924 assert
!(slot
.fill(pkg
).is_ok());
925 Ok(slot
.borrow().unwrap())
928 fn enqueue(&mut self, dl
: Download
<'cfg
>, handle
: Easy
) -> CargoResult
<()> {
929 let mut handle
= self.set
.multi
.add(handle
)?
;
930 let now
= Instant
::now();
931 handle
.set_token(dl
.token
)?
;
932 self.updated_at
.set(now
);
933 self.next_speed_check
.set(now
+ self.timeout
.dur
);
934 self.next_speed_check_bytes_threshold
935 .set(u64::from(self.timeout
.low_speed_limit
));
936 dl
.timed_out
.set(None
);
939 self.pending
.insert(dl
.token
, (dl
, handle
));
943 /// Block, waiting for curl. Returns a token and a `Result` for that token
944 /// (`Ok` means the download successfully finished).
945 fn wait_for_curl(&mut self) -> CargoResult
<(usize, Result
<(), curl
::Error
>)> {
946 // This is the main workhorse loop. We use libcurl's portable `wait`
947 // method to actually perform blocking. This isn't necessarily too
948 // efficient in terms of fd management, but we should only be juggling
951 // Here we start off by asking the `multi` handle to do some work via
952 // the `perform` method. This will actually do I/O work (non-blocking)
953 // and attempt to make progress. Afterwards we ask about the `messages`
954 // contained in the handle which will inform us if anything has finished
957 // If we've got a finished transfer after all that work we break out
958 // and process the finished transfer at the end. Otherwise we need to
959 // actually block waiting for I/O to happen, which we achieve with the
960 // `wait` method on `multi`.
962 let n
= tls
::set(self, || {
966 .with_context(|| "failed to perform http requests")
968 debug
!("handles remaining: {}", n
);
969 let results
= &mut self.results
;
970 let pending
= &self.pending
;
971 self.set
.multi
.messages(|msg
| {
972 let token
= msg
.token().expect("failed to read token");
973 let handle
= &pending
[&token
].1;
974 if let Some(result
) = msg
.result_for(handle
) {
975 results
.push((token
, result
));
977 debug
!("message without a result (?)");
981 if let Some(pair
) = results
.pop() {
984 assert
!(!self.pending
.is_empty());
989 .unwrap_or_else(|| Duration
::new(5, 0));
992 .wait(&mut [], timeout
)
993 .with_context(|| "failed to wait on curl `Multi`")?
;
997 fn progress(&self, token
: usize, total
: u64, cur
: u64) -> bool
{
998 let dl
= &self.pending
[&token
].0;
1000 let now
= Instant
::now();
1001 if cur
> dl
.current
.get() {
1002 let delta
= cur
- dl
.current
.get();
1003 let threshold
= self.next_speed_check_bytes_threshold
.get();
1005 dl
.current
.set(cur
);
1006 self.updated_at
.set(now
);
1008 if delta
>= threshold
{
1009 self.next_speed_check
.set(now
+ self.timeout
.dur
);
1010 self.next_speed_check_bytes_threshold
1011 .set(u64::from(self.timeout
.low_speed_limit
));
1013 self.next_speed_check_bytes_threshold
.set(threshold
- delta
);
1016 if self.tick(WhyTick
::DownloadUpdate
).is_err() {
1020 // If we've spent too long not actually receiving any data we time out.
1021 if now
> self.updated_at
.get() + self.timeout
.dur
{
1022 self.updated_at
.set(now
);
1024 "failed to download any data for `{}` within {}s",
1026 self.timeout
.dur
.as_secs()
1028 dl
.timed_out
.set(Some(msg
));
1032 // If we reached the point in time that we need to check our speed
1033 // limit, see if we've transferred enough data during this threshold. If
1034 // it fails this check then we fail because the download is going too
1036 if now
>= self.next_speed_check
.get() {
1037 self.next_speed_check
.set(now
+ self.timeout
.dur
);
1038 assert
!(self.next_speed_check_bytes_threshold
.get() > 0);
1040 "download of `{}` failed to transfer more \
1041 than {} bytes in {}s",
1043 self.timeout
.low_speed_limit
,
1044 self.timeout
.dur
.as_secs()
1046 dl
.timed_out
.set(Some(msg
));
1053 fn tick(&self, why
: WhyTick
<'_
>) -> CargoResult
<()> {
1054 let mut progress
= self.progress
.borrow_mut();
1055 let progress
= progress
.as_mut().unwrap();
1057 if let WhyTick
::DownloadUpdate
= why
{
1058 if !progress
.update_allowed() {
1062 let pending
= self.pending
.len();
1063 let mut msg
= if pending
== 1 {
1064 format
!("{} crate", pending
)
1066 format
!("{} crates", pending
)
1069 WhyTick
::Extracting(krate
) => {
1070 msg
.push_str(&format
!(", extracting {} ...", krate
));
1073 let mut dur
= Duration
::new(0, 0);
1074 let mut remaining
= 0;
1075 for (dl
, _
) in self.pending
.values() {
1076 dur
+= dl
.start
.elapsed();
1077 // If the total/current look weird just throw out the data
1078 // point, sounds like curl has more to learn before we have
1079 // the true information.
1080 if dl
.total
.get() >= dl
.current
.get() {
1081 remaining
+= dl
.total
.get() - dl
.current
.get();
1084 if remaining
> 0 && dur
> Duration
::from_millis(500) {
1085 msg
.push_str(&format
!(", remaining bytes: {}", ByteSize(remaining
)));
1089 progress
.print_now(&msg
)
1093 #[derive(Copy, Clone)]
1098 Extracting(&'a
str),
1101 impl<'a
, 'cfg
> Drop
for Downloads
<'a
, 'cfg
> {
1102 fn drop(&mut self) {
1103 self.set
.downloading
.set(false);
1104 let progress
= self.progress
.get_mut().take().unwrap();
1105 // Don't print a download summary if we're not using a progress bar,
1106 // we've already printed lots of `Downloading...` items.
1107 if !progress
.is_enabled() {
1110 // If we didn't download anything, no need for a summary.
1111 if self.downloads_finished
== 0 {
1114 // If an error happened, let's not clutter up the output.
1118 // pick the correct plural of crate(s)
1119 let crate_string
= if self.downloads_finished
== 1 {
1124 let mut status
= format
!(
1126 self.downloads_finished
,
1128 ByteSize(self.downloaded_bytes
),
1129 util
::elapsed(self.start
.elapsed())
1131 // print the size of largest crate if it was >1mb
1132 // however don't print if only a single crate was downloaded
1133 // because it is obvious that it will be the largest then
1134 if self.largest
.0 > ByteSize
::mb(1).0 && self.downloads_finished
> 1 {
1135 status
.push_str(&format
!(
1136 " (largest was `{}` at {})",
1138 ByteSize(self.largest
.0),
1141 // Clear progress before displaying final summary.
1143 drop(self.set
.config
.shell().status("Downloaded", status
));
1148 use std
::cell
::Cell
;
1150 use super::Downloads
;
1152 thread_local
!(static PTR
: Cell
<usize> = Cell
::new(0));
1154 pub(crate) fn with
<R
>(f
: impl FnOnce(Option
<&Downloads
<'_
, '_
>>) -> R
) -> R
{
1155 let ptr
= PTR
.with(|p
| p
.get());
1159 unsafe { f(Some(&*(ptr as *const Downloads<'_, '_>))) }
1163 pub(crate) fn set
<R
>(dl
: &Downloads
<'_
, '_
>, f
: impl FnOnce() -> R
) -> R
{
1164 struct Reset
<'a
, T
: Copy
>(&'a Cell
<T
>, T
);
1166 impl<'a
, T
: Copy
> Drop
for Reset
<'a
, T
> {
1167 fn drop(&mut self) {
1173 let _reset
= Reset(p
, p
.get());
1174 p
.set(dl
as *const Downloads
<'_
, '_
> as usize);