1 //! High-level APIs for executing the resolver.
3 //! This module provides functions for running the resolver given a workspace.
4 //! There are roughly 3 main functions:
6 //! - `resolve_ws`: A simple, high-level function with no options.
7 //! - `resolve_ws_with_opts`: A medium-level function with options like
8 //! user-provided features. This is the most appropriate function to use in
10 //! - `resolve_with_previous`: A low-level function for running the resolver,
11 //! providing the most power and flexibility.
13 use crate::core
::compiler
::{CompileKind, RustcTargetData}
;
14 use crate::core
::registry
::{LockedPatchDependency, PackageRegistry}
;
15 use crate::core
::resolver
::features
::{
16 CliFeatures
, FeatureOpts
, FeatureResolver
, ForceAllTargets
, RequestedFeatures
, ResolvedFeatures
,
18 use crate::core
::resolver
::{
19 self, HasDevUnits
, Resolve
, ResolveOpts
, ResolveVersion
, VersionPreferences
,
21 use crate::core
::summary
::Summary
;
22 use crate::core
::Feature
;
24 GitReference
, PackageId
, PackageIdSpec
, PackageSet
, Source
, SourceId
, Workspace
,
27 use crate::sources
::PathSource
;
28 use crate::util
::errors
::CargoResult
;
29 use crate::util
::{profile, CanonicalUrl}
;
30 use anyhow
::Context
as _
;
31 use log
::{debug, trace}
;
32 use std
::collections
::{HashMap, HashSet}
;
34 /// Result for `resolve_ws_with_opts`.
35 pub struct WorkspaceResolve
<'cfg
> {
36 /// Packages to be downloaded.
37 pub pkg_set
: PackageSet
<'cfg
>,
38 /// The resolve for the entire workspace.
40 /// This may be `None` for things like `cargo install` and `-Zavoid-dev-deps`.
41 /// This does not include `paths` overrides.
42 pub workspace_resolve
: Option
<Resolve
>,
43 /// The narrowed resolve, with the specific features enabled, and only the
44 /// given package specs requested.
45 pub targeted_resolve
: Resolve
,
46 /// The features activated per package.
47 pub resolved_features
: ResolvedFeatures
,
50 const UNUSED_PATCH_WARNING
: &str = "\
51 Check that the patched package version and available features are compatible
52 with the dependency requirements. If the patch has a different version from
53 what is locked in the Cargo.lock file, run `cargo update` to use the new
54 version. This may also occur with an optional dependency that is not enabled.";
56 /// Resolves all dependencies for the workspace using the previous
57 /// lock file as a guide if present.
59 /// This function will also write the result of resolution as a new lock file
60 /// (unless it is an ephemeral workspace such as `cargo install` or `cargo
63 /// This is a simple interface used by commands like `clean`, `fetch`, and
64 /// `package`, which don't specify any options or features.
65 pub fn resolve_ws
<'a
>(ws
: &Workspace
<'a
>) -> CargoResult
<(PackageSet
<'a
>, Resolve
)> {
66 let mut registry
= PackageRegistry
::new(ws
.config())?
;
67 let resolve
= resolve_with_registry(ws
, &mut registry
)?
;
68 let packages
= get_resolved_packages(&resolve
, registry
)?
;
69 Ok((packages
, resolve
))
72 /// Resolves dependencies for some packages of the workspace,
73 /// taking into account `paths` overrides and activated features.
75 /// This function will also write the result of resolution as a new lock file
76 /// (unless `Workspace::require_optional_deps` is false, such as `cargo
77 /// install` or `-Z avoid-dev-deps`), or it is an ephemeral workspace (`cargo
78 /// install` or `cargo package`).
80 /// `specs` may be empty, which indicates it should resolve all workspace
81 /// members. In this case, `opts.all_features` must be `true`.
82 pub fn resolve_ws_with_opts
<'cfg
>(
84 target_data
: &RustcTargetData
<'cfg
>,
85 requested_targets
: &[CompileKind
],
86 cli_features
: &CliFeatures
,
87 specs
: &[PackageIdSpec
],
88 has_dev_units
: HasDevUnits
,
89 force_all_targets
: ForceAllTargets
,
90 ) -> CargoResult
<WorkspaceResolve
<'cfg
>> {
91 let mut registry
= PackageRegistry
::new(ws
.config())?
;
92 let mut add_patches
= true;
93 let resolve
= if ws
.ignore_lock() {
95 } else if ws
.require_optional_deps() {
96 // First, resolve the root_package's *listed* dependencies, as well as
97 // downloading and updating all remotes and such.
98 let resolve
= resolve_with_registry(ws
, &mut registry
)?
;
99 // No need to add patches again, `resolve_with_registry` has done it.
102 // Second, resolve with precisely what we're doing. Filter out
103 // transitive dependencies if necessary, specify features, handle
105 let _p
= profile
::start("resolving with overrides...");
107 add_overrides(&mut registry
, ws
)?
;
109 for &(ref replace_spec
, ref dep
) in ws
.root_replace() {
112 .any(|r
| replace_spec
.matches(r
) && !dep
.matches_id(r
))
116 .warn(format
!("package replacement is not used: {}", replace_spec
))?
119 if dep
.features().len() != 0 || !dep
.uses_default_features() {
123 "replacement for `{}` uses the features mechanism. \
124 default-features and features will not take effect because the replacement dependency does not support this mechanism",
132 ops
::load_pkg_lockfile(ws
)?
135 let resolved_with_overrides
= resolve_with_previous(
146 let pkg_set
= get_resolved_packages(&resolved_with_overrides
, registry
)?
;
149 .members_with_features(specs
, cli_features
)?
151 .map(|(p
, _fts
)| p
.package_id())
152 .collect
::<Vec
<_
>>();
153 pkg_set
.download_accessible(
154 &resolved_with_overrides
,
162 let feature_opts
= FeatureOpts
::new(ws
, has_dev_units
, force_all_targets
)?
;
163 let resolved_features
= FeatureResolver
::resolve(
166 &resolved_with_overrides
,
174 pkg_set
.warn_no_lib_packages_and_artifact_libs_overlapping_deps(
176 &resolved_with_overrides
,
184 Ok(WorkspaceResolve
{
186 workspace_resolve
: resolve
,
187 targeted_resolve
: resolved_with_overrides
,
192 fn resolve_with_registry
<'cfg
>(
193 ws
: &Workspace
<'cfg
>,
194 registry
: &mut PackageRegistry
<'cfg
>,
195 ) -> CargoResult
<Resolve
> {
196 let prev
= ops
::load_pkg_lockfile(ws
)?
;
197 let mut resolve
= resolve_with_previous(
200 &CliFeatures
::new_all(true),
208 if !ws
.is_ephemeral() && ws
.require_optional_deps() {
209 ops
::write_pkg_lockfile(ws
, &mut resolve
)?
;
214 /// Resolves all dependencies for a package using an optional previous instance.
215 /// of resolve to guide the resolution process.
217 /// This also takes an optional hash set, `to_avoid`, which is a list of package
218 /// IDs that should be avoided when consulting the previous instance of resolve
219 /// (often used in pairings with updates).
221 /// The previous resolve normally comes from a lock file. This function does not
222 /// read or write lock files from the filesystem.
224 /// `specs` may be empty, which indicates it should resolve all workspace
225 /// members. In this case, `opts.all_features` must be `true`.
227 /// If `register_patches` is true, then entries from the `[patch]` table in
228 /// the manifest will be added to the given `PackageRegistry`.
229 pub fn resolve_with_previous
<'cfg
>(
230 registry
: &mut PackageRegistry
<'cfg
>,
231 ws
: &Workspace
<'cfg
>,
232 cli_features
: &CliFeatures
,
233 has_dev_units
: HasDevUnits
,
234 previous
: Option
<&Resolve
>,
235 to_avoid
: Option
<&HashSet
<PackageId
>>,
236 specs
: &[PackageIdSpec
],
237 register_patches
: bool
,
238 ) -> CargoResult
<Resolve
> {
239 // We only want one Cargo at a time resolving a crate graph since this can
240 // involve a lot of frobbing of the global caches.
241 let _lock
= ws
.config().acquire_package_cache_lock()?
;
243 // Here we place an artificial limitation that all non-registry sources
244 // cannot be locked at more than one revision. This means that if a Git
245 // repository provides more than one package, they must all be updated in
246 // step when any of them are updated.
248 // TODO: this seems like a hokey reason to single out the registry as being
250 let to_avoid_sources
: HashSet
<SourceId
> = to_avoid
253 .map(|p
| p
.source_id())
254 .filter(|s
| !s
.is_registry())
257 .unwrap_or_default();
259 let pre_patch_keep
= |p
: &PackageId
| {
260 !to_avoid_sources
.contains(&p
.source_id())
262 Some(set
) => !set
.contains(p
),
267 // While registering patches, we will record preferences for particular versions
268 // of various packages.
269 let mut version_prefs
= VersionPreferences
::default();
271 // This is a set of PackageIds of `[patch]` entries, and some related locked PackageIds, for
272 // which locking should be avoided (but which will be preferred when searching dependencies,
273 // via prefer_patch_deps below)
274 let mut avoid_patch_ids
= HashSet
::new();
276 if register_patches
{
277 for (url
, patches
) in ws
.root_patch()?
.iter() {
278 for patch
in patches
{
279 version_prefs
.prefer_dependency(patch
.clone());
281 let previous
= match previous
{
284 let patches
: Vec
<_
> = patches
.iter().map(|p
| (p
, None
)).collect();
285 let unlock_ids
= registry
.patch(url
, &patches
)?
;
286 // Since nothing is locked, this shouldn't possibly return anything.
287 assert
!(unlock_ids
.is_empty());
292 // This is a list of pairs where the first element of the pair is
293 // the raw `Dependency` which matches what's listed in `Cargo.toml`.
294 // The second element is, if present, the "locked" version of
295 // the `Dependency` as well as the `PackageId` that it previously
296 // resolved to. This second element is calculated by looking at the
297 // previous resolve graph, which is primarily what's done here to
298 // build the `registrations` list.
299 let mut registrations
= Vec
::new();
301 let candidates
= || {
304 .chain(previous
.unused_patches().iter().cloned())
305 .filter(&pre_patch_keep
)
308 let lock
= match candidates().find(|id
| dep
.matches_id(*id
)) {
309 // If we found an exactly matching candidate in our list of
310 // candidates, then that's the one to use.
311 Some(package_id
) => {
312 let mut locked_dep
= dep
.clone();
313 locked_dep
.lock_to(package_id
);
314 Some(LockedPatchDependency
{
315 dependency
: locked_dep
,
317 alt_package_id
: None
,
321 // If the candidate does not have a matching source id
322 // then we may still have a lock candidate. If we're
323 // loading a v2-encoded resolve graph and `dep` is a
324 // git dep with `branch = 'master'`, then this should
325 // also match candidates without `branch = 'master'`
326 // (which is now treated separately in Cargo).
328 // In this scenario we try to convert candidates located
329 // in the resolve graph to explicitly having the
330 // `master` branch (if they otherwise point to
331 // `DefaultBranch`). If this works and our `dep`
332 // matches that then this is something we'll lock to.
333 match candidates().find(|&id
| {
334 match master_branch_git_source(id
, previous
) {
335 Some(id
) => dep
.matches_id(id
),
339 Some(id_using_default
) => {
340 let id_using_master
= id_using_default
.with_source_id(
341 dep
.source_id().with_precise(
345 .map(|s
| s
.to_string()),
349 let mut locked_dep
= dep
.clone();
350 locked_dep
.lock_to(id_using_master
);
351 Some(LockedPatchDependency
{
352 dependency
: locked_dep
,
353 package_id
: id_using_master
,
354 // Note that this is where the magic
355 // happens, where the resolve graph
356 // probably has locks pointing to
357 // DefaultBranch sources, and by including
358 // this here those will get transparently
359 // rewritten to Branch("master") which we
360 // have a lock entry for.
361 alt_package_id
: Some(id_using_default
),
365 // No locked candidate was found
371 registrations
.push((dep
, lock
));
374 let canonical
= CanonicalUrl
::new(url
)?
;
375 for (orig_patch
, unlock_id
) in registry
.patch(url
, ®istrations
)?
{
376 // Avoid the locked patch ID.
377 avoid_patch_ids
.insert(unlock_id
);
378 // Also avoid the thing it is patching.
379 avoid_patch_ids
.extend(previous
.iter().filter(|id
| {
380 orig_patch
.matches_ignoring_source(*id
)
381 && *id
.source_id().canonical_url() == canonical
386 debug
!("avoid_patch_ids={:?}", avoid_patch_ids
);
388 let keep
= |p
: &PackageId
| pre_patch_keep(p
) && !avoid_patch_ids
.contains(p
);
390 let dev_deps
= ws
.require_optional_deps() || has_dev_units
== HasDevUnits
::Yes
;
391 // In the case where a previous instance of resolve is available, we
392 // want to lock as many packages as possible to the previous version
393 // without disturbing the graph structure.
394 if let Some(r
) = previous
{
395 trace
!("previous: {:?}", r
);
396 register_previous_locks(ws
, registry
, r
, &keep
, dev_deps
);
399 // Prefer to use anything in the previous lock file, aka we want to have conservative updates.
403 debug
!("attempting to prefer {}", id
);
404 version_prefs
.prefer_package_id(id
);
409 if register_patches
{
410 registry
.lock_patches();
413 for member
in ws
.members() {
414 registry
.add_sources(Some(member
.package_id().source_id()))?
;
417 let summaries
: Vec
<(Summary
, ResolveOpts
)> = ws
418 .members_with_features(specs
, cli_features
)?
420 .map(|(member
, features
)| {
421 let summary
= registry
.lock(member
.summary().clone());
426 features
: RequestedFeatures
::CliFeatures(features
),
432 let root_replace
= ws
.root_replace();
434 let replace
= match previous
{
435 Some(r
) => root_replace
437 .map(|&(ref spec
, ref dep
)| {
438 for (&key
, &val
) in r
.replacements().iter() {
439 if spec
.matches(key
) && dep
.matches_id(val
) && keep(&val
) {
440 let mut dep
= dep
.clone();
442 return (spec
.clone(), dep
);
445 (spec
.clone(), dep
.clone())
447 .collect
::<Vec
<_
>>(),
448 None
=> root_replace
.to_vec(),
451 ws
.preload(registry
);
452 let mut resolved
= resolver
::resolve(
458 ws
.unstable_features()
459 .require(Feature
::public_dependency())
462 let patches
: Vec
<_
> = registry
465 .flat_map(|v
| v
.iter().cloned())
467 resolved
.register_used_patches(&patches
[..]);
469 if register_patches
&& !resolved
.unused_patches().is_empty() {
470 emit_warnings_of_unused_patches(ws
, &resolved
, registry
)?
;
473 if let Some(previous
) = previous
{
474 resolved
.merge_from(previous
)?
;
479 /// Read the `paths` configuration variable to discover all path overrides that
480 /// have been configured.
481 pub fn add_overrides
<'a
>(
482 registry
: &mut PackageRegistry
<'a
>,
484 ) -> CargoResult
<()> {
485 let config
= ws
.config();
486 let paths
= match config
.get_list("paths")?
{
488 None
=> return Ok(()),
491 let paths
= paths
.val
.iter().map(|(s
, def
)| {
492 // The path listed next to the string is the config file in which the
493 // key was located, so we want to pop off the `.cargo/config` component
494 // to get the directory containing the `.cargo` folder.
495 (def
.root(config
).join(s
), def
)
498 for (path
, definition
) in paths
{
499 let id
= SourceId
::for_path(&path
)?
;
500 let mut source
= PathSource
::new_recursive(&path
, id
, ws
.config());
501 source
.update().with_context(|| {
503 "failed to update path override `{}` \
509 registry
.add_override(Box
::new(source
));
514 pub fn get_resolved_packages
<'cfg
>(
516 registry
: PackageRegistry
<'cfg
>,
517 ) -> CargoResult
<PackageSet
<'cfg
>> {
518 let ids
: Vec
<PackageId
> = resolve
.iter().collect();
522 /// In this function we're responsible for informing the `registry` of all
523 /// locked dependencies from the previous lock file we had, `resolve`.
525 /// This gets particularly tricky for a couple of reasons. The first is that we
526 /// want all updates to be conservative, so we actually want to take the
527 /// `resolve` into account (and avoid unnecessary registry updates and such).
528 /// the second, however, is that we want to be resilient to updates of
529 /// manifests. For example if a dependency is added or a version is changed we
530 /// want to make sure that we properly re-resolve (conservatively) instead of
531 /// providing an opaque error.
533 /// The logic here is somewhat subtle, but there should be more comments below to
536 /// Note that this function, at the time of this writing, is basically the
537 /// entire fix for issue #4127.
538 fn register_previous_locks(
540 registry
: &mut PackageRegistry
<'_
>,
542 keep
: &dyn Fn(&PackageId
) -> bool
,
545 let path_pkg
= |id
: SourceId
| {
549 if let Ok(path
) = id
.url().to_file_path() {
550 if let Ok(pkg
) = ws
.load(&path
.join("Cargo.toml")) {
557 // Ok so we've been passed in a `keep` function which basically says "if I
558 // return `true` then this package wasn't listed for an update on the command
559 // line". That is, if we run `cargo update -p foo` then `keep(bar)` will return
560 // `true`, whereas `keep(foo)` will return `false` (roughly speaking).
562 // This isn't actually quite what we want, however. Instead we want to
563 // further refine this `keep` function with *all transitive dependencies* of
564 // the packages we're not keeping. For example, consider a case like this:
566 // * There's a crate `log`.
567 // * There's a crate `serde` which depends on `log`.
569 // Let's say we then run `cargo update -p serde`. This may *also* want to
570 // update the `log` dependency as our newer version of `serde` may have a
571 // new minimum version required for `log`. Now this isn't always guaranteed
572 // to work. What'll happen here is we *won't* lock the `log` dependency nor
573 // the `log` crate itself, but we will inform the registry "please prefer
574 // this version of `log`". That way if our newer version of serde works with
575 // the older version of `log`, we conservatively won't update `log`. If,
576 // however, nothing else in the dependency graph depends on `log` and the
577 // newer version of `serde` requires a new version of `log` it'll get pulled
578 // in (as we didn't accidentally lock it to an old version).
580 // Additionally, here we process all path dependencies listed in the previous
581 // resolve. They can not only have their dependencies change but also
582 // the versions of the package change as well. If this ends up happening
583 // then we want to make sure we don't lock a package ID node that doesn't
584 // actually exist. Note that we don't do transitive visits of all the
585 // package's dependencies here as that'll be covered below to poison those
587 let mut avoid_locking
= HashSet
::new();
588 registry
.add_to_yanked_whitelist(resolve
.iter().filter(keep
));
589 for node
in resolve
.iter() {
591 add_deps(resolve
, node
, &mut avoid_locking
);
592 } else if let Some(pkg
) = path_pkg(node
.source_id()) {
593 if pkg
.package_id() != node
{
594 avoid_locking
.insert(node
);
599 // Ok, but the above loop isn't the entire story! Updates to the dependency
600 // graph can come from two locations, the `cargo update` command or
601 // manifests themselves. For example a manifest on the filesystem may
602 // have been updated to have an updated version requirement on `serde`. In
603 // this case both `keep(serde)` and `keep(log)` return `true` (the `keep`
604 // that's an argument to this function). We, however, don't want to keep
605 // either of those! Otherwise we'll get obscure resolve errors about locked
608 // To solve this problem we iterate over all packages with path sources
609 // (aka ones with manifests that are changing) and take a look at all of
610 // their dependencies. If any dependency does not match something in the
611 // previous lock file, then we're guaranteed that the main resolver will
612 // update the source of this dependency no matter what. Knowing this we
613 // poison all packages from the same source, forcing them all to get
616 // This may seem like a heavy hammer, and it is! It means that if you change
617 // anything from crates.io then all of crates.io becomes unlocked. Note,
618 // however, that we still want conservative updates. This currently happens
619 // because the first candidate the resolver picks is the previously locked
620 // version, and only if that fails to activate to we move on and try
621 // a different version. (giving the guise of conservative updates)
623 // For example let's say we had `serde = "0.1"` written in our lock file.
624 // When we later edit this to `serde = "0.1.3"` we don't want to lock serde
625 // at its old version, 0.1.1. Instead we want to allow it to update to
626 // `0.1.3` and update its own dependencies (like above). To do this *all
627 // crates from crates.io* are not locked (aka added to `avoid_locking`).
628 // For dependencies like `log` their previous version in the lock file will
629 // come up first before newer version, if newer version are available.
630 let mut path_deps
= ws
.members().cloned().collect
::<Vec
<_
>>();
631 let mut visited
= HashSet
::new();
632 while let Some(member
) = path_deps
.pop() {
633 if !visited
.insert(member
.package_id()) {
636 let is_ws_member
= ws
.is_member(&member
);
637 for dep
in member
.dependencies() {
638 // If this dependency didn't match anything special then we may want
639 // to poison the source as it may have been added. If this path
640 // dependencies is **not** a workspace member, however, and it's an
641 // optional/non-transitive dependency then it won't be necessarily
642 // be in our lock file. If this shows up then we avoid poisoning
643 // this source as otherwise we'd repeatedly update the registry.
645 // TODO: this breaks adding an optional dependency in a
646 // non-workspace member and then simultaneously editing the
647 // dependency on that crate to enable the feature. For now,
648 // this bug is better than the always-updating registry though.
649 if !is_ws_member
&& (dep
.is_optional() || !dep
.is_transitive()) {
653 // If dev-dependencies aren't being resolved, skip them.
654 if !dep
.is_transitive() && !dev_deps
{
658 // If this is a path dependency, then try to push it onto our
660 if let Some(pkg
) = path_pkg(dep
.source_id()) {
665 // If we match *anything* in the dependency graph then we consider
666 // ourselves all ok, and assume that we'll resolve to that.
667 if resolve
.iter().any(|id
| dep
.matches_ignoring_source(id
)) {
671 // Ok if nothing matches, then we poison the source of these
672 // dependencies and the previous lock file.
674 "poisoning {} because {} looks like it changed {}",
681 .filter(|id
| id
.source_id() == dep
.source_id())
683 add_deps(resolve
, id
, &mut avoid_locking
);
688 // Alright now that we've got our new, fresh, shiny, and refined `keep`
689 // function let's put it to action. Take a look at the previous lock file,
690 // filter everything by this callback, and then shove everything else into
691 // the registry as a locked dependency.
692 let keep
= |id
: &PackageId
| keep(id
) && !avoid_locking
.contains(id
);
694 registry
.clear_lock();
695 for node
in resolve
.iter().filter(keep
) {
697 .deps_not_replaced(node
)
700 .collect
::<Vec
<_
>>();
702 // In the v2 lockfile format and prior the `branch=master` dependency
703 // directive was serialized the same way as the no-branch-listed
704 // directive. Nowadays in Cargo, however, these two directives are
705 // considered distinct and are no longer represented the same way. To
706 // maintain compatibility with older lock files we register locked nodes
707 // for *both* the master branch and the default branch.
709 // Note that this is only applicable for loading older resolves now at
710 // this point. All new lock files are encoded as v3-or-later, so this is
711 // just compat for loading an old lock file successfully.
712 if let Some(node
) = master_branch_git_source(node
, resolve
) {
713 registry
.register_lock(node
, deps
.clone());
716 registry
.register_lock(node
, deps
);
719 /// Recursively add `node` and all its transitive dependencies to `set`.
720 fn add_deps(resolve
: &Resolve
, node
: PackageId
, set
: &mut HashSet
<PackageId
>) {
721 if !set
.insert(node
) {
724 debug
!("ignoring any lock pointing directly at {}", node
);
725 for (dep
, _
) in resolve
.deps_not_replaced(node
) {
726 add_deps(resolve
, dep
, set
);
731 fn master_branch_git_source(id
: PackageId
, resolve
: &Resolve
) -> Option
<PackageId
> {
732 if resolve
.version() <= ResolveVersion
::V2
{
733 let source
= id
.source_id();
734 if let Some(GitReference
::DefaultBranch
) = source
.git_reference() {
736 SourceId
::for_git(source
.url(), GitReference
::Branch("master".to_string()))
738 .with_precise(source
.precise().map(|s
| s
.to_string()));
739 return Some(id
.with_source_id(new_source
));
745 /// Emits warnings of unused patches case by case.
747 /// This function does its best to provide more targeted and helpful
748 /// (such as showing close candidates that failed to match). However, that's
749 /// not terribly easy to do, so just show a general help message if we cannot.
750 fn emit_warnings_of_unused_patches(
753 registry
: &PackageRegistry
<'_
>,
754 ) -> CargoResult
<()> {
755 const MESSAGE
: &str = "was not used in the crate graph.";
757 // Patch package with the source URLs being patch
758 let mut patch_pkgid_to_urls
= HashMap
::new();
759 for (url
, summaries
) in registry
.patches().iter() {
760 for summary
in summaries
.iter() {
762 .entry(summary
.package_id())
763 .or_insert_with(HashSet
::new
)
768 // pkg name -> all source IDs of under the same pkg name
769 let mut source_ids_grouped_by_pkg_name
= HashMap
::new();
770 for pkgid
in resolve
.iter() {
771 source_ids_grouped_by_pkg_name
773 .or_insert_with(HashSet
::new
)
774 .insert(pkgid
.source_id());
777 let mut unemitted_unused_patches
= Vec
::new();
778 for unused
in resolve
.unused_patches().iter() {
779 // Show alternative source URLs if the source URLs being patch
780 // cannot not be found in the crate graph.
782 source_ids_grouped_by_pkg_name
.get(&unused
.name()),
783 patch_pkgid_to_urls
.get(unused
),
785 (Some(ids
), Some(patched_urls
))
788 .all(|id
| !patched_urls
.contains(id
.canonical_url())) =>
791 let mut msg
= String
::new();
792 writeln
!(msg
, "Patch `{}` {}", unused
, MESSAGE
)?
;
795 "Perhaps you misspell the source URL being patched.\n\
796 Possible URLs for `[patch.<URL>]`:",
798 for id
in ids
.iter() {
799 write
!(msg
, "\n {}", id
.display_registry_name())?
;
801 ws
.config().shell().warn(msg
)?
;
803 _
=> unemitted_unused_patches
.push(unused
),
807 // Show general help message.
808 if !unemitted_unused_patches
.is_empty() {
809 let warnings
: Vec
<_
> = unemitted_unused_patches
811 .map(|pkgid
| format
!("Patch `{}` {}", pkgid
, MESSAGE
))
815 .warn(format
!("{}\n{}", warnings
.join("\n"), UNUSED_PATCH_WARNING
))?
;