})
.collect();
- // Query the sources for available versions, mapping `package_ids` into `summaries`.
+ // Query the sources for new versions, mapping `package_ids` into `summaries`.
let mut summaries = Vec::new();
while !package_ids.is_empty() {
package_ids.retain(|&pkg_id| {
use crate::sources::config::SourceConfigMap;
use crate::util::errors::CargoResult;
use crate::util::interning::InternedString;
-use crate::util::{profile, CanonicalUrl, Config};
+use crate::util::{CanonicalUrl, Config};
use anyhow::{bail, Context as _};
use log::{debug, trace};
use url::Url;
}
self.load(namespace, kind)?;
+ self.block_until_ready()?;
Ok(())
}
}
fn load(&mut self, source_id: SourceId, kind: Kind) -> CargoResult<()> {
- (|| {
- debug!("loading source {}", source_id);
- let source = self.source_config.load(source_id, &self.yanked_whitelist)?;
- assert_eq!(source.source_id(), source_id);
-
- if kind == Kind::Override {
- self.overrides.push(source_id);
- }
- self.add_source(source, kind);
+ debug!("loading source {}", source_id);
+ let source = self
+ .source_config
+ .load(source_id, &self.yanked_whitelist)
+ .with_context(|| format!("Unable to update {}", source_id))?;
+ assert_eq!(source.source_id(), source_id);
+
+ if kind == Kind::Override {
+ self.overrides.push(source_id);
+ }
+ self.add_source(source, kind);
- // Ensure the source has fetched all necessary remote data.
- let _p = profile::start(format!("updating: {}", source_id));
- self.sources.get_mut(source_id).unwrap().update()
- })()
- .with_context(|| format!("Unable to update {}", source_id))?;
+ // If we have an imprecise version then we don't know what we're going
+ // to look for, so we always attempt to perform an update here.
+ //
+ // If we have a precise version, then we'll update lazily during the
+ // querying phase. Note that precise in this case is only
+ // `Some("locked")` as other `Some` values indicate a `cargo update
+ // --precise` request
+ if source_id.precise() != Some("locked") {
+ self.sources.get_mut(source_id).unwrap().invalidate_cache();
+ } else {
+ debug!("skipping update due to locked registry");
+ }
Ok(())
}
- fn query_overrides(&mut self, dep: &Dependency) -> CargoResult<Option<Summary>> {
+ fn query_overrides(&mut self, dep: &Dependency) -> Poll<CargoResult<Option<Summary>>> {
for &s in self.overrides.iter() {
let src = self.sources.get_mut(s).unwrap();
let dep = Dependency::new_override(dep.package_name(), s);
- match src.query_vec(&dep)? {
- Poll::Ready(mut results) => {
- if !results.is_empty() {
- return Ok(Some(results.remove(0)));
- }
- }
- Poll::Pending => panic!("overrides have to be on path deps, how did we get here?"),
+ let mut results = match src.query_vec(&dep) {
+ Poll::Ready(results) => results?,
+ Poll::Pending => return Poll::Pending,
+ };
+ if !results.is_empty() {
+ return Poll::Ready(Ok(Some(results.remove(0))));
}
}
- Ok(None)
+ Poll::Ready(Ok(None))
}
/// This function is used to transform a summary to another locked summary
assert!(self.patches_locked);
let (override_summary, n, to_warn) = {
// Look for an override and get ready to query the real source.
- let override_summary = self.query_overrides(dep)?;
+ let override_summary = match self.query_overrides(dep) {
+ Poll::Ready(override_summary) => override_summary?,
+ Poll::Pending => return Poll::Pending,
+ };
// Next up on our list of candidates is to check the `[patch]`
// section of the manifest. Here we look through all patches
}
fn block_until_ready(&mut self) -> CargoResult<()> {
- for (_, source) in self.sources.sources_mut() {
- source.block_until_ready()?;
+ for (source_id, source) in self.sources.sources_mut() {
+ source
+ .block_until_ready()
+ .with_context(|| format!("Unable to update {}", source_id))?;
}
Ok(())
}
.iter()
.map(|s| format!(" * {}", s.package_id()))
.collect::<Vec<_>>();
- return Err(anyhow::anyhow!(
+ return Poll::Ready(Err(anyhow::anyhow!(
"the replacement specification `{}` matched \
multiple packages:\n * {}\n{}",
spec,
s.package_id(),
bullets.join("\n")
- ))
- .into();
+ )));
}
// The dependency should be hard-coded to have the same name and an
// Make sure no duplicates
if let Some(&(ref spec, _)) = potential_matches.next() {
- return Err(anyhow::anyhow!(
+ return Poll::Ready(Err(anyhow::anyhow!(
"overlapping replacement specifications found:\n\n \
* {}\n * {}\n\nboth specifications match: {}",
matched_spec,
spec,
summary.package_id()
- ))
- .into();
+ )));
}
for dep in summary.dependencies() {
Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))),
Poll::Pending => {
all_ready = false;
- None // we can ignore Pending deps, resolve will be repeatedly called until there are none to ignore
+ // we can ignore Pending deps, resolve will be repeatedly called
+ // until there are none to ignore
+ None
}
Poll::Ready(Err(e)) => Some(Err(e).with_context(|| {
format!(
// give an error message that nothing was found.
//
// Maybe the user mistyped the ver_req? Like `dep="2"` when `dep="0.2"`
- // was meant. So we re-query the registry with `deb="*"` so we can
+ // was meant. So we re-query the registry with `dep="*"` so we can
// list a few versions that were actually found.
let all_req = semver::VersionReq::parse("*").unwrap();
let mut new_dep = dep.clone();
new_dep.set_version_req(all_req);
- let mut candidates = Vec::new();
- // TODO: we can ignore the `Pending` case because we are just in an error reporting path,
- // and we have probably already triggered the query anyway. But, if we start getting reports
- // of confusing errors that go away when called again this is a place to look.
- if let Poll::Ready(Err(e)) = registry.query(&new_dep, &mut |s| candidates.push(s), false) {
- return to_resolve_err(e);
+ let mut candidates = loop {
+ match registry.query_vec(&new_dep, false) {
+ Poll::Ready(Ok(candidates)) => break candidates,
+ Poll::Ready(Err(e)) => return to_resolve_err(e),
+ Poll::Pending => match registry.block_until_ready() {
+ Ok(()) => continue,
+ Err(e) => return to_resolve_err(e),
+ },
+ }
};
+
candidates.sort_unstable_by(|a, b| b.version().cmp(a.version()));
- let mut msg = if !candidates.is_empty() {
- let versions = {
- let mut versions = candidates
- .iter()
- .take(3)
- .map(|cand| cand.version().to_string())
- .collect::<Vec<_>>();
+ let mut msg =
+ if !candidates.is_empty() {
+ let versions = {
+ let mut versions = candidates
+ .iter()
+ .take(3)
+ .map(|cand| cand.version().to_string())
+ .collect::<Vec<_>>();
- if candidates.len() > 3 {
- versions.push("...".into());
- }
+ if candidates.len() > 3 {
+ versions.push("...".into());
+ }
- versions.join(", ")
- };
+ versions.join(", ")
+ };
- let mut msg = format!(
- "failed to select a version for the requirement `{} = \"{}\"`\n\
+ let mut msg = format!(
+ "failed to select a version for the requirement `{} = \"{}\"`\n\
candidate versions found which didn't match: {}\n\
location searched: {}\n",
- dep.package_name(),
- dep.version_req(),
- versions,
- registry.describe_source(dep.source_id()),
- );
- msg.push_str("required by ");
- msg.push_str(&describe_path_in_context(cx, &parent.package_id()));
+ dep.package_name(),
+ dep.version_req(),
+ versions,
+ registry.describe_source(dep.source_id()),
+ );
+ msg.push_str("required by ");
+ msg.push_str(&describe_path_in_context(cx, &parent.package_id()));
- // If we have a path dependency with a locked version, then this may
- // indicate that we updated a sub-package and forgot to run `cargo
- // update`. In this case try to print a helpful error!
- if dep.source_id().is_path() && dep.version_req().to_string().starts_with('=') {
- msg.push_str(
- "\nconsider running `cargo update` to update \
+ // If we have a path dependency with a locked version, then this may
+ // indicate that we updated a sub-package and forgot to run `cargo
+ // update`. In this case try to print a helpful error!
+ if dep.source_id().is_path() && dep.version_req().to_string().starts_with('=') {
+ msg.push_str(
+ "\nconsider running `cargo update` to update \
a path dependency's locked version",
- );
- }
+ );
+ }
- if registry.is_replaced(dep.source_id()) {
- msg.push_str("\nperhaps a crate was updated and forgotten to be re-vendored?");
- }
+ if registry.is_replaced(dep.source_id()) {
+ msg.push_str("\nperhaps a crate was updated and forgotten to be re-vendored?");
+ }
- msg
- } else {
- // Maybe the user mistyped the name? Like `dep-thing` when `Dep_Thing`
- // was meant. So we try asking the registry for a `fuzzy` search for suggestions.
- let mut candidates = Vec::new();
- if let Poll::Ready(Err(e)) = registry.query(&new_dep, &mut |s| candidates.push(s), true) {
- // TODO: we can ignore the `Pending` case because we are just in an error reporting path,
- // and we have probably already triggered the query anyway. But, if we start getting reports
- // of confusing errors that go away when called again this is a place to look.
- return to_resolve_err(e);
- };
- candidates.sort_unstable_by_key(|a| a.name());
- candidates.dedup_by(|a, b| a.name() == b.name());
- let mut candidates: Vec<_> = candidates
- .iter()
- .map(|n| (lev_distance(&*new_dep.package_name(), &*n.name()), n))
- .filter(|&(d, _)| d < 4)
- .collect();
- candidates.sort_by_key(|o| o.0);
- let mut msg: String;
- if candidates.is_empty() {
- msg = format!("no matching package named `{}` found\n", dep.package_name());
+ msg
} else {
- msg = format!(
- "no matching package found\nsearched package name: `{}`\n",
- dep.package_name()
- );
+ // Maybe the user mistyped the name? Like `dep-thing` when `Dep_Thing`
+ // was meant. So we try asking the registry for a `fuzzy` search for suggestions.
+ let mut candidates = loop {
+ match registry.query_vec(&new_dep, true) {
+ Poll::Ready(Ok(candidates)) => break candidates,
+ Poll::Ready(Err(e)) => return to_resolve_err(e),
+ Poll::Pending => match registry.block_until_ready() {
+ Ok(()) => continue,
+ Err(e) => return to_resolve_err(e),
+ },
+ }
+ };
- // If dependency package name is equal to the name of the candidate here
- // it may be a prerelease package which hasn't been specified correctly
- if dep.package_name() == candidates[0].1.name()
- && candidates[0].1.package_id().version().is_prerelease()
- {
- msg.push_str("prerelease package needs to be specified explicitly\n");
- msg.push_str(&format!(
- "{name} = {{ version = \"{version}\" }}",
- name = candidates[0].1.name(),
- version = candidates[0].1.package_id().version()
- ));
+ candidates.sort_unstable_by_key(|a| a.name());
+ candidates.dedup_by(|a, b| a.name() == b.name());
+ let mut candidates: Vec<_> = candidates
+ .iter()
+ .map(|n| (lev_distance(&*new_dep.package_name(), &*n.name()), n))
+ .filter(|&(d, _)| d < 4)
+ .collect();
+ candidates.sort_by_key(|o| o.0);
+ let mut msg: String;
+ if candidates.is_empty() {
+ msg = format!("no matching package named `{}` found\n", dep.package_name());
} else {
- let mut names = candidates
- .iter()
- .take(3)
- .map(|c| c.1.name().as_str())
- .collect::<Vec<_>>();
+ msg = format!(
+ "no matching package found\nsearched package name: `{}`\n",
+ dep.package_name()
+ );
- if candidates.len() > 3 {
- names.push("...");
- }
- // Vertically align first suggestion with missing crate name
- // so a typo jumps out at you.
- msg.push_str("perhaps you meant: ");
- msg.push_str(
- &names
+ // If dependency package name is equal to the name of the candidate here
+ // it may be a prerelease package which hasn't been specified correctly
+ if dep.package_name() == candidates[0].1.name()
+ && candidates[0].1.package_id().version().is_prerelease()
+ {
+ msg.push_str("prerelease package needs to be specified explicitly\n");
+ msg.push_str(&format!(
+ "{name} = {{ version = \"{version}\" }}",
+ name = candidates[0].1.name(),
+ version = candidates[0].1.package_id().version()
+ ));
+ } else {
+ let mut names = candidates
.iter()
- .enumerate()
- .fold(String::default(), |acc, (i, el)| match i {
+ .take(3)
+ .map(|c| c.1.name().as_str())
+ .collect::<Vec<_>>();
+
+ if candidates.len() > 3 {
+ names.push("...");
+ }
+ // Vertically align first suggestion with missing crate name
+ // so a typo jumps out at you.
+ msg.push_str("perhaps you meant: ");
+ msg.push_str(&names.iter().enumerate().fold(
+ String::default(),
+ |acc, (i, el)| match i {
0 => acc + el,
i if names.len() - 1 == i && candidates.len() <= 3 => acc + " or " + el,
_ => acc + ", " + el,
- }),
- );
+ },
+ ));
+ }
+ msg.push('\n');
}
- msg.push('\n');
- }
- msg.push_str(&format!("location searched: {}\n", dep.source_id()));
- msg.push_str("required by ");
- msg.push_str(&describe_path_in_context(cx, &parent.package_id()));
+ msg.push_str(&format!("location searched: {}\n", dep.source_id()));
+ msg.push_str("required by ");
+ msg.push_str(&describe_path_in_context(cx, &parent.package_id()));
- msg
- };
+ msg
+ };
if let Some(config) = config {
if config.offline() {
self.query(dep, &mut |s| ret.push(s)).map_ok(|_| ret)
}
- /// Performs any network operations required to get the entire list of all names,
- /// versions and dependencies of packages managed by the `Source`.
- fn update(&mut self) -> CargoResult<()>;
+ /// Ensure that the source is fully up-to-date for the current session on the next query.
+ fn invalidate_cache(&mut self);
/// Fetches the full package for each name and version specified.
fn download(&mut self, package: PackageId) -> CargoResult<MaybePackage>;
(**self).fuzzy_query(dep, f)
}
- /// Forwards to `Source::update`.
- fn update(&mut self) -> CargoResult<()> {
- (**self).update()
+ fn invalidate_cache(&mut self) {
+ (**self).invalidate_cache()
}
/// Forwards to `Source::download`.
(**self).fuzzy_query(dep, f)
}
- fn update(&mut self) -> CargoResult<()> {
- (**self).update()
+ fn invalidate_cache(&mut self) {
+ (**self).invalidate_cache()
}
fn download(&mut self, id: PackageId) -> CargoResult<MaybePackage> {
use crate::core::compiler::{BuildConfig, CompileMode, DefaultExecutor, Executor};
use crate::core::resolver::CliFeatures;
use crate::core::{Feature, Shell, Verbosity, Workspace};
-use crate::core::{Package, PackageId, PackageSet, Resolve, Source, SourceId};
+use crate::core::{Package, PackageId, PackageSet, Resolve, SourceId};
use crate::sources::PathSource;
use crate::util::errors::CargoResult;
use crate::util::toml::TomlManifest;
let _lock = config.acquire_package_cache_lock()?;
if needs_update {
- source.update()?;
+ source.invalidate_cache();
}
let deps = loop {
// with other global Cargos
let _lock = config.acquire_package_cache_lock()?;
- source.update()?;
+ source.invalidate_cache();
return if let Some(dep) = dep {
select_dep_pkg(source, dep, config, false)
use std::iter::repeat;
use std::path::PathBuf;
use std::str;
+use std::task::Poll;
use std::time::Duration;
use std::{cmp, env};
let _lock = config.acquire_package_cache_lock()?;
let mut src = RegistrySource::remote(sid, &HashSet::new(), config);
// Only update the index if the config is not available or `force` is set.
- let cfg = src.config();
- let mut updated_cfg = || {
- src.update()
- .with_context(|| format!("failed to update {}", sid))?;
- src.config()
- };
-
- let cfg = if force_update {
- updated_cfg()?
- } else {
- cfg.or_else(|_| updated_cfg())?
+ if force_update {
+ src.invalidate_cache()
+ }
+ let cfg = loop {
+ match src.config()? {
+ Poll::Pending => src
+ .block_until_ready()
+ .with_context(|| format!("failed to update {}", sid))?,
+ Poll::Ready(cfg) => break cfg,
+ }
};
cfg.and_then(|cfg| cfg.api)
};
use crate::core::summary::Summary;
use crate::core::Feature;
-use crate::core::{
- GitReference, PackageId, PackageIdSpec, PackageSet, Source, SourceId, Workspace,
-};
+use crate::core::{GitReference, PackageId, PackageIdSpec, PackageSet, SourceId, Workspace};
use crate::ops;
use crate::sources::PathSource;
use crate::util::errors::CargoResult;
root: PathBuf,
packages: HashMap<PackageId, (Package, Checksum)>,
config: &'cfg Config,
+ updated: bool,
}
#[derive(Deserialize)]
root: path.to_path_buf(),
config,
packages: HashMap::new(),
+ updated: false,
}
}
-}
-
-impl<'cfg> Debug for DirectorySource<'cfg> {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- write!(f, "DirectorySource {{ root: {:?} }}", self.root)
- }
-}
-
-impl<'cfg> Source for DirectorySource<'cfg> {
- fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> Poll<CargoResult<()>> {
- let packages = self.packages.values().map(|p| &p.0);
- let matches = packages.filter(|pkg| dep.matches(pkg.summary()));
- for summary in matches.map(|pkg| pkg.summary().clone()) {
- f(summary);
- }
- Poll::Ready(Ok(()))
- }
-
- fn fuzzy_query(
- &mut self,
- _dep: &Dependency,
- f: &mut dyn FnMut(Summary),
- ) -> Poll<CargoResult<()>> {
- let packages = self.packages.values().map(|p| &p.0);
- for summary in packages.map(|pkg| pkg.summary().clone()) {
- f(summary);
- }
- Poll::Ready(Ok(()))
- }
-
- fn supports_checksums(&self) -> bool {
- true
- }
-
- fn requires_precise(&self) -> bool {
- true
- }
-
- fn source_id(&self) -> SourceId {
- self.source_id
- }
fn update(&mut self) -> CargoResult<()> {
self.packages.clear();
Ok(())
}
+}
+
+impl<'cfg> Debug for DirectorySource<'cfg> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ write!(f, "DirectorySource {{ root: {:?} }}", self.root)
+ }
+}
+
+impl<'cfg> Source for DirectorySource<'cfg> {
+ fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> Poll<CargoResult<()>> {
+ if !self.updated {
+ return Poll::Pending;
+ }
+ let packages = self.packages.values().map(|p| &p.0);
+ let matches = packages.filter(|pkg| dep.matches(pkg.summary()));
+ for summary in matches.map(|pkg| pkg.summary().clone()) {
+ f(summary);
+ }
+ Poll::Ready(Ok(()))
+ }
+
+ fn fuzzy_query(
+ &mut self,
+ _dep: &Dependency,
+ f: &mut dyn FnMut(Summary),
+ ) -> Poll<CargoResult<()>> {
+ if !self.updated {
+ return Poll::Pending;
+ }
+ let packages = self.packages.values().map(|p| &p.0);
+ for summary in packages.map(|pkg| pkg.summary().clone()) {
+ f(summary);
+ }
+ Poll::Ready(Ok(()))
+ }
+
+ fn supports_checksums(&self) -> bool {
+ true
+ }
+
+ fn requires_precise(&self) -> bool {
+ true
+ }
+
+ fn source_id(&self) -> SourceId {
+ self.source_id
+ }
fn download(&mut self, id: PackageId) -> CargoResult<MaybePackage> {
self.packages
}
fn block_until_ready(&mut self) -> CargoResult<()> {
+ self.update()?;
+ self.updated = true;
Ok(())
}
+
+ fn invalidate_cache(&mut self) {}
}
path_source: Option<PathSource<'cfg>>,
ident: String,
config: &'cfg Config,
+ updated: bool,
}
impl<'cfg> GitSource<'cfg> {
path_source: None,
ident,
config,
+ updated: false,
};
Ok(source)
}
self.path_source.as_mut().unwrap().read_packages()
}
-}
-
-fn ident(id: &SourceId) -> String {
- let ident = id
- .canonical_url()
- .raw_canonicalized_url()
- .path_segments()
- .and_then(|s| s.rev().next())
- .unwrap_or("");
-
- let ident = if ident.is_empty() { "_empty" } else { ident };
-
- format!("{}-{}", ident, short_hash(id.canonical_url()))
-}
-impl<'cfg> Debug for GitSource<'cfg> {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- write!(f, "git repo at {}", self.remote.url())?;
-
- match self.manifest_reference.pretty_ref() {
- Some(s) => write!(f, " ({})", s),
- None => Ok(()),
+ fn update(&mut self) -> CargoResult<()> {
+ if self.updated {
+ return Ok(());
}
- }
-}
-
-impl<'cfg> Source for GitSource<'cfg> {
- fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> Poll<CargoResult<()>> {
- let src = self
- .path_source
- .as_mut()
- .expect("BUG: `update()` must be called before `query()`");
- src.query(dep, f)
- }
-
- fn fuzzy_query(
- &mut self,
- dep: &Dependency,
- f: &mut dyn FnMut(Summary),
- ) -> Poll<CargoResult<()>> {
- let src = self
- .path_source
- .as_mut()
- .expect("BUG: `update()` must be called before `query()`");
- src.fuzzy_query(dep, f)
- }
-
- fn supports_checksums(&self) -> bool {
- false
- }
- fn requires_precise(&self) -> bool {
- true
- }
-
- fn source_id(&self) -> SourceId {
- self.source_id
- }
-
- fn update(&mut self) -> CargoResult<()> {
let git_path = self.config.git_path();
let git_path = self.config.assert_package_cache_locked(&git_path);
let db_path = git_path.join("db").join(&self.ident);
// doesn't have it.
(locked_rev, db) => {
if self.config.offline() {
- anyhow::bail!(
+ return Err(anyhow::anyhow!(
"can't checkout from '{}': you are in the offline mode (--offline)",
self.remote.url()
- );
+ ));
}
self.config.shell().status(
"Updating",
self.path_source = Some(path_source);
self.locked_rev = Some(actual_rev);
- self.path_source.as_mut().unwrap().update()
+ self.path_source.as_mut().unwrap().update()?;
+ self.updated = true;
+ Ok(())
+ }
+}
+
+fn ident(id: &SourceId) -> String {
+ let ident = id
+ .canonical_url()
+ .raw_canonicalized_url()
+ .path_segments()
+ .and_then(|s| s.rev().next())
+ .unwrap_or("");
+
+ let ident = if ident.is_empty() { "_empty" } else { ident };
+
+ format!("{}-{}", ident, short_hash(id.canonical_url()))
+}
+
+impl<'cfg> Debug for GitSource<'cfg> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ write!(f, "git repo at {}", self.remote.url())?;
+
+ match self.manifest_reference.pretty_ref() {
+ Some(s) => write!(f, " ({})", s),
+ None => Ok(()),
+ }
+ }
+}
+
+impl<'cfg> Source for GitSource<'cfg> {
+ fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> Poll<CargoResult<()>> {
+ if let Some(src) = self.path_source.as_mut() {
+ src.query(dep, f)
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn fuzzy_query(
+ &mut self,
+ dep: &Dependency,
+ f: &mut dyn FnMut(Summary),
+ ) -> Poll<CargoResult<()>> {
+ if let Some(src) = self.path_source.as_mut() {
+ src.fuzzy_query(dep, f)
+ } else {
+ Poll::Pending
+ }
+ }
+
+ fn supports_checksums(&self) -> bool {
+ false
+ }
+
+ fn requires_precise(&self) -> bool {
+ true
+ }
+
+ fn source_id(&self) -> SourceId {
+ self.source_id
}
fn download(&mut self, id: PackageId) -> CargoResult<MaybePackage> {
}
fn block_until_ready(&mut self) -> CargoResult<()> {
- Ok(())
+ self.update()
+ }
+
+ fn invalidate_cache(&mut self) {
+ self.updated = false;
}
}
pub fn path(&self) -> &Path {
&self.path
}
+
+ pub fn update(&mut self) -> CargoResult<()> {
+ if !self.updated {
+ let packages = self.read_packages()?;
+ self.packages.extend(packages.into_iter());
+ self.updated = true;
+ }
+
+ Ok(())
+ }
}
impl<'cfg> Debug for PathSource<'cfg> {
impl<'cfg> Source for PathSource<'cfg> {
fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> Poll<CargoResult<()>> {
+ if !self.updated {
+ return Poll::Pending;
+ }
for s in self.packages.iter().map(|p| p.summary()) {
if dep.matches(s) {
f(s.clone())
_dep: &Dependency,
f: &mut dyn FnMut(Summary),
) -> Poll<CargoResult<()>> {
+ if !self.updated {
+ return Poll::Pending;
+ }
for s in self.packages.iter().map(|p| p.summary()) {
f(s.clone())
}
self.source_id
}
- fn update(&mut self) -> CargoResult<()> {
- if !self.updated {
- let packages = self.read_packages()?;
- self.packages.extend(packages.into_iter());
- self.updated = true;
- }
-
- Ok(())
- }
-
fn download(&mut self, id: PackageId) -> CargoResult<MaybePackage> {
trace!("getting packages; id={}", id);
}
fn block_until_ready(&mut self) -> CargoResult<()> {
- Ok(())
+ self.update()
}
+
+ fn invalidate_cache(&mut self) {}
}
// than the registry itself. In effect this is intended to be a quite
// cheap operation.
let summaries = match self.load_summaries(name, load)? {
- Poll::Ready(x) => x,
+ Poll::Ready(summaries) => summaries,
Poll::Pending => return Poll::Pending,
};
// structures.
load.prepare()?;
- // let root = self.config.assert_package_cache_locked(&self.path);
let root = load.assert_index_locked(&self.path);
let cache_root = root.join(".cache");
let index_version = load.current_version();
let raw_path = make_dep_path(&fs_name, false);
let mut any_pending = false;
-
// Attempt to handle misspellings by searching for a chain of related
// names to the original `raw_path` name. Only return summaries
// associated with the first hit, however. The resolver will later
Poll::Ready(Ok(self.summaries_cache.get_mut(&name).unwrap()))
}
+ /// Clears the in-memory summmaries cache.
+ pub fn clear_summaries_cache(&mut self) {
+ self.summaries_cache.clear();
+ }
+
pub fn query_inner(
&mut self,
dep: &Dependency,
let source_id = self.source_id;
let summaries = match self.summaries(dep.package_name(), dep.version_req(), load)? {
- Poll::Ready(x) => x,
+ Poll::Ready(summaries) => summaries,
Poll::Pending => return Poll::Pending,
};
}
}
- // This is the fallback path where we actually talk to libgit2 to load
+ // This is the fallback path where we actually talk to the registry backend to load
// information. Here we parse every single line in the index (as we need
// to find the versions)
log::debug!("slow path for {:?}", relative);
cache_bytes = Some(cache.serialize(index_version));
}
Ok(())
- });
+ })?;
if matches!(err, Poll::Pending) {
assert!(!hit_closure);
debug_assert!(cache_contents.is_none());
return Poll::Ready(Ok(None));
}
- let _ = err?;
// If we've got debug assertions enabled and the cache was previously
// present and considered fresh this is where the debug assertions
root: Filesystem,
src_path: Filesystem,
config: &'cfg Config,
+ updated: bool,
}
impl<'cfg> LocalRegistry<'cfg> {
index_path: Filesystem::new(root.join("index")),
root: Filesystem::new(root.to_path_buf()),
config,
+ updated: false,
}
}
}
path: &Path,
data: &mut dyn FnMut(&[u8]) -> CargoResult<()>,
) -> Poll<CargoResult<()>> {
- Poll::Ready(Ok(data(&paths::read_bytes(&root.join(path))?)?))
+ if self.updated {
+ Poll::Ready(Ok(data(&paths::read_bytes(&root.join(path))?)?))
+ } else {
+ Poll::Pending
+ }
}
- fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
+ fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
// Local registries don't have configuration for remote APIs or anything
// like that
- Ok(None)
+ Poll::Ready(Ok(None))
}
- fn update_index(&mut self) -> CargoResult<()> {
- // Nothing to update, we just use what's on disk. Verify it actually
- // exists though. We don't use any locks as we're just checking whether
- // these directories exist.
- let root = self.root.clone().into_path_unlocked();
- if !root.is_dir() {
- anyhow::bail!("local registry path is not a directory: {}", root.display())
- }
- let index_path = self.index_path.clone().into_path_unlocked();
- if !index_path.is_dir() {
- anyhow::bail!(
- "local registry index path is not a directory: {}",
- index_path.display()
- )
+ fn block_until_ready(&mut self) -> CargoResult<()> {
+ if !self.updated {
+ // Nothing to update, we just use what's on disk. Verify it actually
+ // exists though. We don't use any locks as we're just checking whether
+ // these directories exist.
+ let root = self.root.clone().into_path_unlocked();
+ if !root.is_dir() {
+ anyhow::bail!("local registry path is not a directory: {}", root.display());
+ }
+ let index_path = self.index_path.clone().into_path_unlocked();
+ if !index_path.is_dir() {
+ anyhow::bail!(
+ "local registry index path is not a directory: {}",
+ index_path.display()
+ );
+ }
+ self.updated = true;
}
Ok(())
}
+ fn invalidate_cache(&mut self) {
+ // Local registry has no cache - just reads from disk.
+ }
+
+ fn is_updated(&self) -> bool {
+ self.updated
+ }
+
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version());
) -> CargoResult<File> {
panic!("this source doesn't download")
}
-
- fn block_until_ready(&mut self) -> CargoResult<()> {
- Ok(())
- }
}
src_path: Filesystem,
/// Local reference to [`Config`] for convenience.
config: &'cfg Config,
- /// Whether or not the index has been updated.
- ///
- /// This is used as an optimization to avoid updating if not needed, such
- /// as `Cargo.lock` already exists and the index already contains the
- /// locked entries. Or, to avoid updating multiple times.
- ///
- /// Only remote registries really need to update. Local registries only
- /// check that the index exists.
- updated: bool,
/// Abstraction for interfacing to the different registry kinds.
ops: Box<dyn RegistryData + 'cfg>,
/// Interface for managing the on-disk index.
/// Loads the `config.json` file and returns it.
///
/// Local registries don't have a config, and return `None`.
- fn config(&mut self) -> CargoResult<Option<RegistryConfig>>;
+ fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>>;
- /// Updates the index.
+ /// Ensures the index is updated with the latest data.
///
- /// For a remote registry, this updates the index over the network. Local
- /// registries only check that the index exists.
- fn update_index(&mut self) -> CargoResult<()>;
+ /// Invalidates cached data.
+ fn invalidate_cache(&mut self);
+
+ /// Is the local cached data up-to-date?
+ fn is_updated(&self) -> bool;
/// Prepare to start downloading a `.crate` file.
///
src_path: config.registry_source_path().join(name),
config,
source_id,
- updated: false,
index: index::RegistryIndex::new(source_id, ops.index_path(), config),
yanked_whitelist: yanked_whitelist.clone(),
ops,
/// Decode the configuration stored within the registry.
///
/// This requires that the index has been at least checked out.
- pub fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
+ pub fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
self.ops.config()
}
Ok(unpack_dir.to_path_buf())
}
- fn do_update(&mut self) -> CargoResult<()> {
- self.ops.update_index()?;
- let path = self.ops.index_path();
- self.index = index::RegistryIndex::new(self.source_id, path, self.config);
- self.updated = true;
- Ok(())
- }
-
fn get_pkg(&mut self, package: PackageId, path: &File) -> CargoResult<Package> {
let path = self
.unpack_package(package, path)
// theory the registry is known to contain this version. If, however, we
// come back with no summaries, then our registry may need to be
// updated, so we fall back to performing a lazy update.
- if dep.source_id().precise().is_some() && !self.updated {
+ if dep.source_id().precise().is_some() && !self.ops.is_updated() {
debug!("attempting query without update");
let mut called = false;
let pend =
return Poll::Ready(Ok(()));
} else {
debug!("falling back to an update");
- self.do_update()?;
+ self.invalidate_cache();
+ return Poll::Pending;
}
}
self.source_id
}
- fn update(&mut self) -> CargoResult<()> {
- // If we have an imprecise version then we don't know what we're going
- // to look for, so we always attempt to perform an update here.
- //
- // If we have a precise version, then we'll update lazily during the
- // querying phase. Note that precise in this case is only
- // `Some("locked")` as other `Some` values indicate a `cargo update
- // --precise` request
- if self.source_id.precise() != Some("locked") {
- self.do_update()?;
- } else {
- debug!("skipping update due to locked registry");
- }
- Ok(())
+ fn invalidate_cache(&mut self) {
+ self.index.clear_summaries_cache();
+ self.ops.invalidate_cache();
}
fn download(&mut self, package: PackageId) -> CargoResult<MaybePackage> {
}
fn is_yanked(&mut self, pkg: PackageId) -> CargoResult<bool> {
- if !self.updated {
- self.do_update()?;
- }
+ self.invalidate_cache();
loop {
match self.index.is_yanked(pkg, &mut *self.ops)? {
- Poll::Ready(yanked) => {
- return Ok(yanked);
- }
- Poll::Pending => {
- self.block_until_ready()?;
- }
+ Poll::Ready(yanked) => return Ok(yanked),
+ Poll::Pending => self.block_until_ready()?,
}
}
}
};
use crate::util::errors::CargoResult;
use crate::util::interning::InternedString;
-use crate::util::network::PollExt;
use crate::util::{Config, Filesystem};
use anyhow::Context as _;
use cargo_util::{paths, registry::make_dep_path, Sha256};
repo: LazyCell<git2::Repository>,
head: Cell<Option<git2::Oid>>,
current_sha: Cell<Option<InternedString>>,
+ needs_update: Cell<bool>,
+ updated: bool,
}
impl<'cfg> RemoteRegistry<'cfg> {
repo: LazyCell::new(),
head: Cell::new(None),
current_sha: Cell::new(None),
+ needs_update: Cell::new(false),
+ updated: false,
}
}
fn filename(&self, pkg: PackageId) -> String {
format!("{}-{}.crate", pkg.name(), pkg.version())
}
+
+ fn update_index_worker(&mut self) -> CargoResult<()> {
+ if self.config.offline() {
+ return Ok(());
+ }
+ if self.config.cli_unstable().no_index_update {
+ return Ok(());
+ }
+ // Make sure the index is only updated once per session since it is an
+ // expensive operation. This generally only happens when the resolver
+ // is run multiple times, such as during `cargo publish`.
+ if self.config.updated_sources().contains(&self.source_id) {
+ return Ok(());
+ }
+
+ debug!("updating the index");
+
+ // Ensure that we'll actually be able to acquire an HTTP handle later on
+ // once we start trying to download crates. This will weed out any
+ // problems with `.cargo/config` configuration related to HTTP.
+ //
+ // This way if there's a problem the error gets printed before we even
+ // hit the index, which may not actually read this configuration.
+ self.config.http()?;
+
+ self.prepare()?;
+ self.head.set(None);
+ *self.tree.borrow_mut() = None;
+ self.current_sha.set(None);
+ let path = self.config.assert_package_cache_locked(&self.index_path);
+ self.config
+ .shell()
+ .status("Updating", self.source_id.display_index())?;
+
+ // Fetch the latest version of our `index_git_ref` into the index
+ // checkout.
+ let url = self.source_id.url();
+ let repo = self.repo.borrow_mut().unwrap();
+ git::fetch(repo, url.as_str(), &self.index_git_ref, self.config)
+ .with_context(|| format!("failed to fetch `{}`", url))?;
+ self.config.updated_sources().insert(self.source_id);
+
+ // Create a dummy file to record the mtime for when we updated the
+ // index.
+ paths::create(&path.join(LAST_UPDATED_FILE))?;
+
+ Ok(())
+ }
}
const LAST_UPDATED_FILE: &str = ".last-updated";
path: &Path,
data: &mut dyn FnMut(&[u8]) -> CargoResult<()>,
) -> Poll<CargoResult<()>> {
+ if self.needs_update.get() {
+ return Poll::Pending;
+ }
+
// Note that the index calls this method and the filesystem is locked
// in the index, so we don't need to worry about an `update_index`
// happening in a different process.
- let repo = self.repo()?;
- let tree = self.tree()?;
- let entry = tree.get_path(path)?;
- let object = entry.to_object(repo)?;
- let blob = match object.as_blob() {
- Some(blob) => blob,
- None => {
- return Err(anyhow::anyhow!(
- "path `{}` is not a blob in the git repo",
- path.display()
- ))
- .into()
+ let mut not_found = false;
+ let result = || -> CargoResult<CargoResult<()>> {
+ let repo = self.repo()?;
+ let tree = self.tree()?;
+ let entry = tree.get_path(path);
+ if let Some(e) = entry.as_ref().err() {
+ not_found = e.code() == git2::ErrorCode::NotFound;
}
- };
- Poll::Ready(Ok(data(blob.content())?))
+ let entry = entry?;
+ let object = entry.to_object(repo)?;
+ let blob = match object.as_blob() {
+ Some(blob) => blob,
+ None => anyhow::bail!("path `{}` is not a blob in the git repo", path.display()),
+ };
+ Ok(data(blob.content()))
+ }();
+
+ match result {
+ Ok(result) => Poll::Ready(result),
+ Err(_) if !self.updated => {
+ // If git returns an error and we haven't updated the repo, return
+ // pending to allow an update to try again.
+ self.needs_update.set(true);
+ Poll::Pending
+ }
+ Err(e)
+ if e.downcast_ref::<git2::Error>()
+ .map(|e| e.code() == git2::ErrorCode::NotFound)
+ .unwrap_or_default() =>
+ {
+ // The repo has been updated and the file does not exist.
+ Poll::Ready(Ok(()))
+ }
+ Err(e) => Poll::Ready(Err(e)),
+ }
}
- fn config(&mut self) -> CargoResult<Option<RegistryConfig>> {
+ fn config(&mut self) -> Poll<CargoResult<Option<RegistryConfig>>> {
debug!("loading config");
self.prepare()?;
self.config.assert_package_cache_locked(&self.index_path);
let mut config = None;
- self.load(Path::new(""), Path::new("config.json"), &mut |json| {
+ match self.load(Path::new(""), Path::new("config.json"), &mut |json| {
config = Some(serde_json::from_slice(json)?);
Ok(())
- })
- .expect("git registries never return pending")?;
- trace!("config loaded");
- Ok(config)
+ })? {
+ Poll::Ready(()) => {
+ trace!("config loaded");
+ Poll::Ready(Ok(config))
+ }
+ Poll::Pending => Poll::Pending,
+ }
}
- fn update_index(&mut self) -> CargoResult<()> {
- if self.config.offline() {
- return Ok(());
+ fn invalidate_cache(&mut self) {
+ if !self.updated {
+ self.needs_update.set(true);
}
- if self.config.cli_unstable().no_index_update {
- return Ok(());
- }
- // Make sure the index is only updated once per session since it is an
- // expensive operation. This generally only happens when the resolver
- // is run multiple times, such as during `cargo publish`.
- if self.config.updated_sources().contains(&self.source_id) {
- return Ok(());
- }
-
- debug!("updating the index");
-
- // Ensure that we'll actually be able to acquire an HTTP handle later on
- // once we start trying to download crates. This will weed out any
- // problems with `.cargo/config` configuration related to HTTP.
- //
- // This way if there's a problem the error gets printed before we even
- // hit the index, which may not actually read this configuration.
- self.config.http()?;
-
- self.prepare()?;
- self.head.set(None);
- *self.tree.borrow_mut() = None;
- self.current_sha.set(None);
- let path = self.config.assert_package_cache_locked(&self.index_path);
- self.config
- .shell()
- .status("Updating", self.source_id.display_index())?;
-
- // Fetch the latest version of our `index_git_ref` into the index
- // checkout.
- let url = self.source_id.url();
- let repo = self.repo.borrow_mut().unwrap();
- git::fetch(repo, url.as_str(), &self.index_git_ref, self.config)
- .with_context(|| format!("failed to fetch `{}`", url))?;
- self.config.updated_sources().insert(self.source_id);
-
- // Create a dummy file to record the mtime for when we updated the
- // index.
- paths::create(&path.join(LAST_UPDATED_FILE))?;
+ }
- Ok(())
+ fn is_updated(&self) -> bool {
+ self.updated
}
fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult<MaybeLock> {
}
}
- let config = self.config()?.unwrap();
+ let config = loop {
+ match self.config()? {
+ Poll::Pending => self.block_until_ready()?,
+ Poll::Ready(cfg) => break cfg.unwrap(),
+ }
+ };
+
let mut url = config.dl;
if !url.contains(CRATE_TEMPLATE)
&& !url.contains(VERSION_TEMPLATE)
}
fn block_until_ready(&mut self) -> CargoResult<()> {
+ if self.needs_update.get() {
+ self.update_index_worker()?;
+ self.needs_update.set(false);
+ self.updated = true;
+ }
Ok(())
}
}
})
}
- fn update(&mut self) -> CargoResult<()> {
- self.inner
- .update()
- .with_context(|| format!("failed to update replaced source {}", self.to_replace))?;
- Ok(())
+ fn invalidate_cache(&mut self) {
+ self.inner.invalidate_cache()
}
fn download(&mut self, id: PackageId) -> CargoResult<MaybePackage> {
}
fn block_until_ready(&mut self) -> CargoResult<()> {
- self.inner.block_until_ready()
+ self.inner
+ .block_until_ready()
+ .with_context(|| format!("failed to update replaced source {}", self.to_replace))
}
}
);
let lock = cfg.acquire_package_cache_lock().unwrap();
let mut regsrc = RegistrySource::remote(sid, &HashSet::new(), &cfg);
- regsrc.update().unwrap();
+ regsrc.invalidate_cache();
+ regsrc.block_until_ready().unwrap();
drop(lock);
cargo_process("search postgres")