]> git.proxmox.com Git - cargo.git/blob - src/cargo/ops/resolve.rs
3f96e2629345e9c91fff73ebd09d358faa1570f8
[cargo.git] / src / cargo / ops / resolve.rs
1 //! High-level APIs for executing the resolver.
2 //!
3 //! This module provides functions for running the resolver given a workspace.
4 //! There are roughly 3 main functions:
5 //!
6 //! - `resolve_ws`: A simple, high-level function with no options.
7 //! - `resolve_ws_with_opts`: A medium-level function with options like
8 //! user-provided features. This is the most appropriate function to use in
9 //! most cases.
10 //! - `resolve_with_previous`: A low-level function for running the resolver,
11 //! providing the most power and flexibility.
12
13 use crate::core::compiler::{CompileKind, RustcTargetData};
14 use crate::core::registry::{LockedPatchDependency, PackageRegistry};
15 use crate::core::resolver::features::{
16 CliFeatures, FeatureOpts, FeatureResolver, ForceAllTargets, RequestedFeatures, ResolvedFeatures,
17 };
18 use crate::core::resolver::{
19 self, HasDevUnits, Resolve, ResolveOpts, ResolveVersion, VersionPreferences,
20 };
21 use crate::core::summary::Summary;
22 use crate::core::Feature;
23 use crate::core::{
24 GitReference, PackageId, PackageIdSpec, PackageSet, Source, SourceId, Workspace,
25 };
26 use crate::ops;
27 use crate::sources::PathSource;
28 use crate::util::errors::CargoResult;
29 use crate::util::{profile, CanonicalUrl};
30 use anyhow::Context as _;
31 use log::{debug, trace};
32 use std::collections::{HashMap, HashSet};
33
34 /// Result for `resolve_ws_with_opts`.
35 pub struct WorkspaceResolve<'cfg> {
36 /// Packages to be downloaded.
37 pub pkg_set: PackageSet<'cfg>,
38 /// The resolve for the entire workspace.
39 ///
40 /// This may be `None` for things like `cargo install` and `-Zavoid-dev-deps`.
41 /// This does not include `paths` overrides.
42 pub workspace_resolve: Option<Resolve>,
43 /// The narrowed resolve, with the specific features enabled, and only the
44 /// given package specs requested.
45 pub targeted_resolve: Resolve,
46 /// The features activated per package.
47 pub resolved_features: ResolvedFeatures,
48 }
49
50 const UNUSED_PATCH_WARNING: &str = "\
51 Check that the patched package version and available features are compatible
52 with the dependency requirements. If the patch has a different version from
53 what is locked in the Cargo.lock file, run `cargo update` to use the new
54 version. This may also occur with an optional dependency that is not enabled.";
55
56 /// Resolves all dependencies for the workspace using the previous
57 /// lock file as a guide if present.
58 ///
59 /// This function will also write the result of resolution as a new lock file
60 /// (unless it is an ephemeral workspace such as `cargo install` or `cargo
61 /// package`).
62 ///
63 /// This is a simple interface used by commands like `clean`, `fetch`, and
64 /// `package`, which don't specify any options or features.
65 pub fn resolve_ws<'a>(ws: &Workspace<'a>) -> CargoResult<(PackageSet<'a>, Resolve)> {
66 let mut registry = PackageRegistry::new(ws.config())?;
67 let resolve = resolve_with_registry(ws, &mut registry)?;
68 let packages = get_resolved_packages(&resolve, registry)?;
69 Ok((packages, resolve))
70 }
71
72 /// Resolves dependencies for some packages of the workspace,
73 /// taking into account `paths` overrides and activated features.
74 ///
75 /// This function will also write the result of resolution as a new lock file
76 /// (unless `Workspace::require_optional_deps` is false, such as `cargo
77 /// install` or `-Z avoid-dev-deps`), or it is an ephemeral workspace (`cargo
78 /// install` or `cargo package`).
79 ///
80 /// `specs` may be empty, which indicates it should resolve all workspace
81 /// members. In this case, `opts.all_features` must be `true`.
82 pub fn resolve_ws_with_opts<'cfg>(
83 ws: &Workspace<'cfg>,
84 target_data: &RustcTargetData<'cfg>,
85 requested_targets: &[CompileKind],
86 cli_features: &CliFeatures,
87 specs: &[PackageIdSpec],
88 has_dev_units: HasDevUnits,
89 force_all_targets: ForceAllTargets,
90 ) -> CargoResult<WorkspaceResolve<'cfg>> {
91 let mut registry = PackageRegistry::new(ws.config())?;
92 let mut add_patches = true;
93 let resolve = if ws.ignore_lock() {
94 None
95 } else if ws.require_optional_deps() {
96 // First, resolve the root_package's *listed* dependencies, as well as
97 // downloading and updating all remotes and such.
98 let resolve = resolve_with_registry(ws, &mut registry)?;
99 // No need to add patches again, `resolve_with_registry` has done it.
100 add_patches = false;
101
102 // Second, resolve with precisely what we're doing. Filter out
103 // transitive dependencies if necessary, specify features, handle
104 // overrides, etc.
105 let _p = profile::start("resolving with overrides...");
106
107 add_overrides(&mut registry, ws)?;
108
109 for &(ref replace_spec, ref dep) in ws.root_replace() {
110 if !resolve
111 .iter()
112 .any(|r| replace_spec.matches(r) && !dep.matches_id(r))
113 {
114 ws.config()
115 .shell()
116 .warn(format!("package replacement is not used: {}", replace_spec))?
117 }
118
119 if dep.features().len() != 0 || !dep.uses_default_features() {
120 ws.config()
121 .shell()
122 .warn(format!(
123 "replacement for `{}` uses the features mechanism. \
124 default-features and features will not take effect because the replacement dependency does not support this mechanism",
125 dep.package_name()
126 ))?
127 }
128 }
129
130 Some(resolve)
131 } else {
132 ops::load_pkg_lockfile(ws)?
133 };
134
135 let resolved_with_overrides = resolve_with_previous(
136 &mut registry,
137 ws,
138 cli_features,
139 has_dev_units,
140 resolve.as_ref(),
141 None,
142 specs,
143 add_patches,
144 )?;
145
146 let pkg_set = get_resolved_packages(&resolved_with_overrides, registry)?;
147
148 let member_ids = ws
149 .members_with_features(specs, cli_features)?
150 .into_iter()
151 .map(|(p, _fts)| p.package_id())
152 .collect::<Vec<_>>();
153 pkg_set.download_accessible(
154 &resolved_with_overrides,
155 &member_ids,
156 has_dev_units,
157 requested_targets,
158 target_data,
159 force_all_targets,
160 )?;
161
162 let feature_opts = FeatureOpts::new(ws, has_dev_units, force_all_targets)?;
163 let resolved_features = FeatureResolver::resolve(
164 ws,
165 target_data,
166 &resolved_with_overrides,
167 &pkg_set,
168 cli_features,
169 specs,
170 requested_targets,
171 feature_opts,
172 )?;
173
174 pkg_set.warn_no_lib_packages_and_artifact_libs_overlapping_deps(
175 ws,
176 &resolved_with_overrides,
177 &member_ids,
178 has_dev_units,
179 requested_targets,
180 target_data,
181 force_all_targets,
182 )?;
183
184 Ok(WorkspaceResolve {
185 pkg_set,
186 workspace_resolve: resolve,
187 targeted_resolve: resolved_with_overrides,
188 resolved_features,
189 })
190 }
191
192 fn resolve_with_registry<'cfg>(
193 ws: &Workspace<'cfg>,
194 registry: &mut PackageRegistry<'cfg>,
195 ) -> CargoResult<Resolve> {
196 let prev = ops::load_pkg_lockfile(ws)?;
197 let mut resolve = resolve_with_previous(
198 registry,
199 ws,
200 &CliFeatures::new_all(true),
201 HasDevUnits::Yes,
202 prev.as_ref(),
203 None,
204 &[],
205 true,
206 )?;
207
208 if !ws.is_ephemeral() && ws.require_optional_deps() {
209 ops::write_pkg_lockfile(ws, &mut resolve)?;
210 }
211 Ok(resolve)
212 }
213
214 /// Resolves all dependencies for a package using an optional previous instance.
215 /// of resolve to guide the resolution process.
216 ///
217 /// This also takes an optional hash set, `to_avoid`, which is a list of package
218 /// IDs that should be avoided when consulting the previous instance of resolve
219 /// (often used in pairings with updates).
220 ///
221 /// The previous resolve normally comes from a lock file. This function does not
222 /// read or write lock files from the filesystem.
223 ///
224 /// `specs` may be empty, which indicates it should resolve all workspace
225 /// members. In this case, `opts.all_features` must be `true`.
226 ///
227 /// If `register_patches` is true, then entries from the `[patch]` table in
228 /// the manifest will be added to the given `PackageRegistry`.
229 pub fn resolve_with_previous<'cfg>(
230 registry: &mut PackageRegistry<'cfg>,
231 ws: &Workspace<'cfg>,
232 cli_features: &CliFeatures,
233 has_dev_units: HasDevUnits,
234 previous: Option<&Resolve>,
235 to_avoid: Option<&HashSet<PackageId>>,
236 specs: &[PackageIdSpec],
237 register_patches: bool,
238 ) -> CargoResult<Resolve> {
239 // We only want one Cargo at a time resolving a crate graph since this can
240 // involve a lot of frobbing of the global caches.
241 let _lock = ws.config().acquire_package_cache_lock()?;
242
243 // Here we place an artificial limitation that all non-registry sources
244 // cannot be locked at more than one revision. This means that if a Git
245 // repository provides more than one package, they must all be updated in
246 // step when any of them are updated.
247 //
248 // TODO: this seems like a hokey reason to single out the registry as being
249 // different.
250 let to_avoid_sources: HashSet<SourceId> = to_avoid
251 .map(|set| {
252 set.iter()
253 .map(|p| p.source_id())
254 .filter(|s| !s.is_registry())
255 .collect()
256 })
257 .unwrap_or_default();
258
259 let pre_patch_keep = |p: &PackageId| {
260 !to_avoid_sources.contains(&p.source_id())
261 && match to_avoid {
262 Some(set) => !set.contains(p),
263 None => true,
264 }
265 };
266
267 // While registering patches, we will record preferences for particular versions
268 // of various packages.
269 let mut version_prefs = VersionPreferences::default();
270
271 // This is a set of PackageIds of `[patch]` entries, and some related locked PackageIds, for
272 // which locking should be avoided (but which will be preferred when searching dependencies,
273 // via prefer_patch_deps below)
274 let mut avoid_patch_ids = HashSet::new();
275
276 if register_patches {
277 for (url, patches) in ws.root_patch()?.iter() {
278 for patch in patches {
279 version_prefs.prefer_dependency(patch.clone());
280 }
281 let previous = match previous {
282 Some(r) => r,
283 None => {
284 let patches: Vec<_> = patches.iter().map(|p| (p, None)).collect();
285 let unlock_ids = registry.patch(url, &patches)?;
286 // Since nothing is locked, this shouldn't possibly return anything.
287 assert!(unlock_ids.is_empty());
288 continue;
289 }
290 };
291
292 // This is a list of pairs where the first element of the pair is
293 // the raw `Dependency` which matches what's listed in `Cargo.toml`.
294 // The second element is, if present, the "locked" version of
295 // the `Dependency` as well as the `PackageId` that it previously
296 // resolved to. This second element is calculated by looking at the
297 // previous resolve graph, which is primarily what's done here to
298 // build the `registrations` list.
299 let mut registrations = Vec::new();
300 for dep in patches {
301 let candidates = || {
302 previous
303 .iter()
304 .chain(previous.unused_patches().iter().cloned())
305 .filter(&pre_patch_keep)
306 };
307
308 let lock = match candidates().find(|id| dep.matches_id(*id)) {
309 // If we found an exactly matching candidate in our list of
310 // candidates, then that's the one to use.
311 Some(package_id) => {
312 let mut locked_dep = dep.clone();
313 locked_dep.lock_to(package_id);
314 Some(LockedPatchDependency {
315 dependency: locked_dep,
316 package_id,
317 alt_package_id: None,
318 })
319 }
320 None => {
321 // If the candidate does not have a matching source id
322 // then we may still have a lock candidate. If we're
323 // loading a v2-encoded resolve graph and `dep` is a
324 // git dep with `branch = 'master'`, then this should
325 // also match candidates without `branch = 'master'`
326 // (which is now treated separately in Cargo).
327 //
328 // In this scenario we try to convert candidates located
329 // in the resolve graph to explicitly having the
330 // `master` branch (if they otherwise point to
331 // `DefaultBranch`). If this works and our `dep`
332 // matches that then this is something we'll lock to.
333 match candidates().find(|&id| {
334 match master_branch_git_source(id, previous) {
335 Some(id) => dep.matches_id(id),
336 None => false,
337 }
338 }) {
339 Some(id_using_default) => {
340 let id_using_master = id_using_default.with_source_id(
341 dep.source_id().with_precise(
342 id_using_default
343 .source_id()
344 .precise()
345 .map(|s| s.to_string()),
346 ),
347 );
348
349 let mut locked_dep = dep.clone();
350 locked_dep.lock_to(id_using_master);
351 Some(LockedPatchDependency {
352 dependency: locked_dep,
353 package_id: id_using_master,
354 // Note that this is where the magic
355 // happens, where the resolve graph
356 // probably has locks pointing to
357 // DefaultBranch sources, and by including
358 // this here those will get transparently
359 // rewritten to Branch("master") which we
360 // have a lock entry for.
361 alt_package_id: Some(id_using_default),
362 })
363 }
364
365 // No locked candidate was found
366 None => None,
367 }
368 }
369 };
370
371 registrations.push((dep, lock));
372 }
373
374 let canonical = CanonicalUrl::new(url)?;
375 for (orig_patch, unlock_id) in registry.patch(url, &registrations)? {
376 // Avoid the locked patch ID.
377 avoid_patch_ids.insert(unlock_id);
378 // Also avoid the thing it is patching.
379 avoid_patch_ids.extend(previous.iter().filter(|id| {
380 orig_patch.matches_ignoring_source(*id)
381 && *id.source_id().canonical_url() == canonical
382 }));
383 }
384 }
385 }
386 debug!("avoid_patch_ids={:?}", avoid_patch_ids);
387
388 let keep = |p: &PackageId| pre_patch_keep(p) && !avoid_patch_ids.contains(p);
389
390 let dev_deps = ws.require_optional_deps() || has_dev_units == HasDevUnits::Yes;
391 // In the case where a previous instance of resolve is available, we
392 // want to lock as many packages as possible to the previous version
393 // without disturbing the graph structure.
394 if let Some(r) = previous {
395 trace!("previous: {:?}", r);
396 register_previous_locks(ws, registry, r, &keep, dev_deps);
397 }
398
399 // Prefer to use anything in the previous lock file, aka we want to have conservative updates.
400 for r in previous {
401 for id in r.iter() {
402 if keep(&id) {
403 debug!("attempting to prefer {}", id);
404 version_prefs.prefer_package_id(id);
405 }
406 }
407 }
408
409 if register_patches {
410 registry.lock_patches();
411 }
412
413 for member in ws.members() {
414 registry.add_sources(Some(member.package_id().source_id()))?;
415 }
416
417 let summaries: Vec<(Summary, ResolveOpts)> = ws
418 .members_with_features(specs, cli_features)?
419 .into_iter()
420 .map(|(member, features)| {
421 let summary = registry.lock(member.summary().clone());
422 (
423 summary,
424 ResolveOpts {
425 dev_deps,
426 features: RequestedFeatures::CliFeatures(features),
427 },
428 )
429 })
430 .collect();
431
432 let root_replace = ws.root_replace();
433
434 let replace = match previous {
435 Some(r) => root_replace
436 .iter()
437 .map(|&(ref spec, ref dep)| {
438 for (&key, &val) in r.replacements().iter() {
439 if spec.matches(key) && dep.matches_id(val) && keep(&val) {
440 let mut dep = dep.clone();
441 dep.lock_to(val);
442 return (spec.clone(), dep);
443 }
444 }
445 (spec.clone(), dep.clone())
446 })
447 .collect::<Vec<_>>(),
448 None => root_replace.to_vec(),
449 };
450
451 ws.preload(registry);
452 let mut resolved = resolver::resolve(
453 &summaries,
454 &replace,
455 registry,
456 &version_prefs,
457 Some(ws.config()),
458 ws.unstable_features()
459 .require(Feature::public_dependency())
460 .is_ok(),
461 )?;
462 let patches: Vec<_> = registry
463 .patches()
464 .values()
465 .flat_map(|v| v.iter().cloned())
466 .collect();
467 resolved.register_used_patches(&patches[..]);
468
469 if register_patches && !resolved.unused_patches().is_empty() {
470 emit_warnings_of_unused_patches(ws, &resolved, registry)?;
471 }
472
473 if let Some(previous) = previous {
474 resolved.merge_from(previous)?;
475 }
476 Ok(resolved)
477 }
478
479 /// Read the `paths` configuration variable to discover all path overrides that
480 /// have been configured.
481 pub fn add_overrides<'a>(
482 registry: &mut PackageRegistry<'a>,
483 ws: &Workspace<'a>,
484 ) -> CargoResult<()> {
485 let config = ws.config();
486 let paths = match config.get_list("paths")? {
487 Some(list) => list,
488 None => return Ok(()),
489 };
490
491 let paths = paths.val.iter().map(|(s, def)| {
492 // The path listed next to the string is the config file in which the
493 // key was located, so we want to pop off the `.cargo/config` component
494 // to get the directory containing the `.cargo` folder.
495 (def.root(config).join(s), def)
496 });
497
498 for (path, definition) in paths {
499 let id = SourceId::for_path(&path)?;
500 let mut source = PathSource::new_recursive(&path, id, ws.config());
501 source.update().with_context(|| {
502 format!(
503 "failed to update path override `{}` \
504 (defined in `{}`)",
505 path.display(),
506 definition
507 )
508 })?;
509 registry.add_override(Box::new(source));
510 }
511 Ok(())
512 }
513
514 pub fn get_resolved_packages<'cfg>(
515 resolve: &Resolve,
516 registry: PackageRegistry<'cfg>,
517 ) -> CargoResult<PackageSet<'cfg>> {
518 let ids: Vec<PackageId> = resolve.iter().collect();
519 registry.get(&ids)
520 }
521
522 /// In this function we're responsible for informing the `registry` of all
523 /// locked dependencies from the previous lock file we had, `resolve`.
524 ///
525 /// This gets particularly tricky for a couple of reasons. The first is that we
526 /// want all updates to be conservative, so we actually want to take the
527 /// `resolve` into account (and avoid unnecessary registry updates and such).
528 /// the second, however, is that we want to be resilient to updates of
529 /// manifests. For example if a dependency is added or a version is changed we
530 /// want to make sure that we properly re-resolve (conservatively) instead of
531 /// providing an opaque error.
532 ///
533 /// The logic here is somewhat subtle, but there should be more comments below to
534 /// clarify things.
535 ///
536 /// Note that this function, at the time of this writing, is basically the
537 /// entire fix for issue #4127.
538 fn register_previous_locks(
539 ws: &Workspace<'_>,
540 registry: &mut PackageRegistry<'_>,
541 resolve: &Resolve,
542 keep: &dyn Fn(&PackageId) -> bool,
543 dev_deps: bool,
544 ) {
545 let path_pkg = |id: SourceId| {
546 if !id.is_path() {
547 return None;
548 }
549 if let Ok(path) = id.url().to_file_path() {
550 if let Ok(pkg) = ws.load(&path.join("Cargo.toml")) {
551 return Some(pkg);
552 }
553 }
554 None
555 };
556
557 // Ok so we've been passed in a `keep` function which basically says "if I
558 // return `true` then this package wasn't listed for an update on the command
559 // line". That is, if we run `cargo update -p foo` then `keep(bar)` will return
560 // `true`, whereas `keep(foo)` will return `false` (roughly speaking).
561 //
562 // This isn't actually quite what we want, however. Instead we want to
563 // further refine this `keep` function with *all transitive dependencies* of
564 // the packages we're not keeping. For example, consider a case like this:
565 //
566 // * There's a crate `log`.
567 // * There's a crate `serde` which depends on `log`.
568 //
569 // Let's say we then run `cargo update -p serde`. This may *also* want to
570 // update the `log` dependency as our newer version of `serde` may have a
571 // new minimum version required for `log`. Now this isn't always guaranteed
572 // to work. What'll happen here is we *won't* lock the `log` dependency nor
573 // the `log` crate itself, but we will inform the registry "please prefer
574 // this version of `log`". That way if our newer version of serde works with
575 // the older version of `log`, we conservatively won't update `log`. If,
576 // however, nothing else in the dependency graph depends on `log` and the
577 // newer version of `serde` requires a new version of `log` it'll get pulled
578 // in (as we didn't accidentally lock it to an old version).
579 //
580 // Additionally, here we process all path dependencies listed in the previous
581 // resolve. They can not only have their dependencies change but also
582 // the versions of the package change as well. If this ends up happening
583 // then we want to make sure we don't lock a package ID node that doesn't
584 // actually exist. Note that we don't do transitive visits of all the
585 // package's dependencies here as that'll be covered below to poison those
586 // if they changed.
587 let mut avoid_locking = HashSet::new();
588 registry.add_to_yanked_whitelist(resolve.iter().filter(keep));
589 for node in resolve.iter() {
590 if !keep(&node) {
591 add_deps(resolve, node, &mut avoid_locking);
592 } else if let Some(pkg) = path_pkg(node.source_id()) {
593 if pkg.package_id() != node {
594 avoid_locking.insert(node);
595 }
596 }
597 }
598
599 // Ok, but the above loop isn't the entire story! Updates to the dependency
600 // graph can come from two locations, the `cargo update` command or
601 // manifests themselves. For example a manifest on the filesystem may
602 // have been updated to have an updated version requirement on `serde`. In
603 // this case both `keep(serde)` and `keep(log)` return `true` (the `keep`
604 // that's an argument to this function). We, however, don't want to keep
605 // either of those! Otherwise we'll get obscure resolve errors about locked
606 // versions.
607 //
608 // To solve this problem we iterate over all packages with path sources
609 // (aka ones with manifests that are changing) and take a look at all of
610 // their dependencies. If any dependency does not match something in the
611 // previous lock file, then we're guaranteed that the main resolver will
612 // update the source of this dependency no matter what. Knowing this we
613 // poison all packages from the same source, forcing them all to get
614 // updated.
615 //
616 // This may seem like a heavy hammer, and it is! It means that if you change
617 // anything from crates.io then all of crates.io becomes unlocked. Note,
618 // however, that we still want conservative updates. This currently happens
619 // because the first candidate the resolver picks is the previously locked
620 // version, and only if that fails to activate to we move on and try
621 // a different version. (giving the guise of conservative updates)
622 //
623 // For example let's say we had `serde = "0.1"` written in our lock file.
624 // When we later edit this to `serde = "0.1.3"` we don't want to lock serde
625 // at its old version, 0.1.1. Instead we want to allow it to update to
626 // `0.1.3` and update its own dependencies (like above). To do this *all
627 // crates from crates.io* are not locked (aka added to `avoid_locking`).
628 // For dependencies like `log` their previous version in the lock file will
629 // come up first before newer version, if newer version are available.
630 let mut path_deps = ws.members().cloned().collect::<Vec<_>>();
631 let mut visited = HashSet::new();
632 while let Some(member) = path_deps.pop() {
633 if !visited.insert(member.package_id()) {
634 continue;
635 }
636 let is_ws_member = ws.is_member(&member);
637 for dep in member.dependencies() {
638 // If this dependency didn't match anything special then we may want
639 // to poison the source as it may have been added. If this path
640 // dependencies is **not** a workspace member, however, and it's an
641 // optional/non-transitive dependency then it won't be necessarily
642 // be in our lock file. If this shows up then we avoid poisoning
643 // this source as otherwise we'd repeatedly update the registry.
644 //
645 // TODO: this breaks adding an optional dependency in a
646 // non-workspace member and then simultaneously editing the
647 // dependency on that crate to enable the feature. For now,
648 // this bug is better than the always-updating registry though.
649 if !is_ws_member && (dep.is_optional() || !dep.is_transitive()) {
650 continue;
651 }
652
653 // If dev-dependencies aren't being resolved, skip them.
654 if !dep.is_transitive() && !dev_deps {
655 continue;
656 }
657
658 // If this is a path dependency, then try to push it onto our
659 // worklist.
660 if let Some(pkg) = path_pkg(dep.source_id()) {
661 path_deps.push(pkg);
662 continue;
663 }
664
665 // If we match *anything* in the dependency graph then we consider
666 // ourselves all ok, and assume that we'll resolve to that.
667 if resolve.iter().any(|id| dep.matches_ignoring_source(id)) {
668 continue;
669 }
670
671 // Ok if nothing matches, then we poison the source of these
672 // dependencies and the previous lock file.
673 debug!(
674 "poisoning {} because {} looks like it changed {}",
675 dep.source_id(),
676 member.package_id(),
677 dep.package_name()
678 );
679 for id in resolve
680 .iter()
681 .filter(|id| id.source_id() == dep.source_id())
682 {
683 add_deps(resolve, id, &mut avoid_locking);
684 }
685 }
686 }
687
688 // Alright now that we've got our new, fresh, shiny, and refined `keep`
689 // function let's put it to action. Take a look at the previous lock file,
690 // filter everything by this callback, and then shove everything else into
691 // the registry as a locked dependency.
692 let keep = |id: &PackageId| keep(id) && !avoid_locking.contains(id);
693
694 registry.clear_lock();
695 for node in resolve.iter().filter(keep) {
696 let deps = resolve
697 .deps_not_replaced(node)
698 .map(|p| p.0)
699 .filter(keep)
700 .collect::<Vec<_>>();
701
702 // In the v2 lockfile format and prior the `branch=master` dependency
703 // directive was serialized the same way as the no-branch-listed
704 // directive. Nowadays in Cargo, however, these two directives are
705 // considered distinct and are no longer represented the same way. To
706 // maintain compatibility with older lock files we register locked nodes
707 // for *both* the master branch and the default branch.
708 //
709 // Note that this is only applicable for loading older resolves now at
710 // this point. All new lock files are encoded as v3-or-later, so this is
711 // just compat for loading an old lock file successfully.
712 if let Some(node) = master_branch_git_source(node, resolve) {
713 registry.register_lock(node, deps.clone());
714 }
715
716 registry.register_lock(node, deps);
717 }
718
719 /// Recursively add `node` and all its transitive dependencies to `set`.
720 fn add_deps(resolve: &Resolve, node: PackageId, set: &mut HashSet<PackageId>) {
721 if !set.insert(node) {
722 return;
723 }
724 debug!("ignoring any lock pointing directly at {}", node);
725 for (dep, _) in resolve.deps_not_replaced(node) {
726 add_deps(resolve, dep, set);
727 }
728 }
729 }
730
731 fn master_branch_git_source(id: PackageId, resolve: &Resolve) -> Option<PackageId> {
732 if resolve.version() <= ResolveVersion::V2 {
733 let source = id.source_id();
734 if let Some(GitReference::DefaultBranch) = source.git_reference() {
735 let new_source =
736 SourceId::for_git(source.url(), GitReference::Branch("master".to_string()))
737 .unwrap()
738 .with_precise(source.precise().map(|s| s.to_string()));
739 return Some(id.with_source_id(new_source));
740 }
741 }
742 None
743 }
744
745 /// Emits warnings of unused patches case by case.
746 ///
747 /// This function does its best to provide more targeted and helpful
748 /// (such as showing close candidates that failed to match). However, that's
749 /// not terribly easy to do, so just show a general help message if we cannot.
750 fn emit_warnings_of_unused_patches(
751 ws: &Workspace<'_>,
752 resolve: &Resolve,
753 registry: &PackageRegistry<'_>,
754 ) -> CargoResult<()> {
755 const MESSAGE: &str = "was not used in the crate graph.";
756
757 // Patch package with the source URLs being patch
758 let mut patch_pkgid_to_urls = HashMap::new();
759 for (url, summaries) in registry.patches().iter() {
760 for summary in summaries.iter() {
761 patch_pkgid_to_urls
762 .entry(summary.package_id())
763 .or_insert_with(HashSet::new)
764 .insert(url);
765 }
766 }
767
768 // pkg name -> all source IDs of under the same pkg name
769 let mut source_ids_grouped_by_pkg_name = HashMap::new();
770 for pkgid in resolve.iter() {
771 source_ids_grouped_by_pkg_name
772 .entry(pkgid.name())
773 .or_insert_with(HashSet::new)
774 .insert(pkgid.source_id());
775 }
776
777 let mut unemitted_unused_patches = Vec::new();
778 for unused in resolve.unused_patches().iter() {
779 // Show alternative source URLs if the source URLs being patch
780 // cannot not be found in the crate graph.
781 match (
782 source_ids_grouped_by_pkg_name.get(&unused.name()),
783 patch_pkgid_to_urls.get(unused),
784 ) {
785 (Some(ids), Some(patched_urls))
786 if ids
787 .iter()
788 .all(|id| !patched_urls.contains(id.canonical_url())) =>
789 {
790 use std::fmt::Write;
791 let mut msg = String::new();
792 writeln!(msg, "Patch `{}` {}", unused, MESSAGE)?;
793 write!(
794 msg,
795 "Perhaps you misspell the source URL being patched.\n\
796 Possible URLs for `[patch.<URL>]`:",
797 )?;
798 for id in ids.iter() {
799 write!(msg, "\n {}", id.display_registry_name())?;
800 }
801 ws.config().shell().warn(msg)?;
802 }
803 _ => unemitted_unused_patches.push(unused),
804 }
805 }
806
807 // Show general help message.
808 if !unemitted_unused_patches.is_empty() {
809 let warnings: Vec<_> = unemitted_unused_patches
810 .iter()
811 .map(|pkgid| format!("Patch `{}` {}", pkgid, MESSAGE))
812 .collect();
813 ws.config()
814 .shell()
815 .warn(format!("{}\n{}", warnings.join("\n"), UNUSED_PATCH_WARNING))?;
816 }
817
818 return Ok(());
819 }