]> git.proxmox.com Git - cargo.git/blob - src/cargo/core/compiler/unit_dependencies.rs
Decouple build_unit_dependencies and Context.
[cargo.git] / src / cargo / core / compiler / unit_dependencies.rs
1 //! Constructs the dependency graph for compilation.
2 //!
3 //! Rust code is typically organized as a set of Cargo packages. The
4 //! dependencies between the packages themselves are stored in the
5 //! `Resolve` struct. However, we can't use that information as is for
6 //! compilation! A package typically contains several targets, or crates,
7 //! and these targets has inter-dependencies. For example, you need to
8 //! compile the `lib` target before the `bin` one, and you need to compile
9 //! `build.rs` before either of those.
10 //!
11 //! So, we need to lower the `Resolve`, which specifies dependencies between
12 //! *packages*, to a graph of dependencies between their *targets*, and this
13 //! is exactly what this module is doing! Well, almost exactly: another
14 //! complication is that we might want to compile the same target several times
15 //! (for example, with and without tests), so we actually build a dependency
16 //! graph of `Unit`s, which capture these properties.
17
18 use crate::core::compiler::Unit;
19 use crate::core::compiler::{BuildContext, CompileMode, Kind};
20 use crate::core::dependency::Kind as DepKind;
21 use crate::core::package::Downloads;
22 use crate::core::profiles::UnitFor;
23 use crate::core::{Package, PackageId, Target};
24 use crate::CargoResult;
25 use log::trace;
26 use std::collections::{HashMap, HashSet};
27
28 struct State<'a, 'cfg> {
29 bcx: &'a BuildContext<'a, 'cfg>,
30 waiting_on_download: HashSet<PackageId>,
31 downloads: Downloads<'a, 'cfg>,
32 unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
33 package_cache: HashMap<PackageId, &'a Package>,
34 }
35
36 pub fn build_unit_dependencies<'a, 'cfg>(
37 bcx: &'a BuildContext<'a, 'cfg>,
38 roots: &[Unit<'a>],
39 ) -> CargoResult<HashMap<Unit<'a>, Vec<Unit<'a>>>> {
40 let mut state = State {
41 bcx,
42 downloads: bcx.packages.enable_download()?,
43 waiting_on_download: HashSet::new(),
44 unit_dependencies: HashMap::new(),
45 package_cache: HashMap::new(),
46 };
47
48 loop {
49 for unit in roots.iter() {
50 state.get(unit.pkg.package_id())?;
51
52 // Dependencies of tests/benches should not have `panic` set.
53 // We check the global test mode to see if we are running in `cargo
54 // test` in which case we ensure all dependencies have `panic`
55 // cleared, and avoid building the lib thrice (once with `panic`, once
56 // without, once for `--test`). In particular, the lib included for
57 // Doc tests and examples are `Build` mode here.
58 let unit_for = if unit.mode.is_any_test() || state.bcx.build_config.test() {
59 UnitFor::new_test()
60 } else if unit.target.is_custom_build() {
61 // This normally doesn't happen, except `clean` aggressively
62 // generates all units.
63 UnitFor::new_build()
64 } else if unit.target.for_host() {
65 // Proc macro / plugin should never have panic set.
66 UnitFor::new_compiler()
67 } else {
68 UnitFor::new_normal()
69 };
70 deps_of(unit, &mut state, unit_for)?;
71 }
72
73 if !state.waiting_on_download.is_empty() {
74 state.finish_some_downloads()?;
75 state.unit_dependencies.clear();
76 } else {
77 break;
78 }
79 }
80
81 connect_run_custom_build_deps(&mut state);
82
83 trace!("ALL UNIT DEPENDENCIES {:#?}", state.unit_dependencies);
84
85 // Dependencies are used in tons of places throughout the backend, many of
86 // which affect the determinism of the build itself. As a result be sure
87 // that dependency lists are always sorted to ensure we've always got a
88 // deterministic output.
89 for list in state.unit_dependencies.values_mut() {
90 list.sort();
91 }
92
93 Ok(state.unit_dependencies)
94 }
95
96 fn deps_of<'a, 'cfg>(
97 unit: &Unit<'a>,
98 state: &mut State<'a, 'cfg>,
99 unit_for: UnitFor,
100 ) -> CargoResult<()> {
101 // Currently the `unit_dependencies` map does not include `unit_for`. This should
102 // be safe for now. `TestDependency` only exists to clear the `panic`
103 // flag, and you'll never ask for a `unit` with `panic` set as a
104 // `TestDependency`. `CustomBuild` should also be fine since if the
105 // requested unit's settings are the same as `Any`, `CustomBuild` can't
106 // affect anything else in the hierarchy.
107 if !state.unit_dependencies.contains_key(unit) {
108 let unit_deps = compute_deps(unit, state, unit_for)?;
109 let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect();
110 state.unit_dependencies.insert(*unit, to_insert);
111 for (unit, unit_for) in unit_deps {
112 deps_of(&unit, state, unit_for)?;
113 }
114 }
115 Ok(())
116 }
117
118 /// For a package, returns all targets that are registered as dependencies
119 /// for that package.
120 /// This returns a `Vec` of `(Unit, UnitFor)` pairs. The `UnitFor`
121 /// is the profile type that should be used for dependencies of the unit.
122 fn compute_deps<'a, 'cfg>(
123 unit: &Unit<'a>,
124 state: &mut State<'a, 'cfg>,
125 unit_for: UnitFor,
126 ) -> CargoResult<Vec<(Unit<'a>, UnitFor)>> {
127 if unit.mode.is_run_custom_build() {
128 return compute_deps_custom_build(unit, state.bcx);
129 } else if unit.mode.is_doc() {
130 // Note: this does not include doc test.
131 return compute_deps_doc(unit, state);
132 }
133
134 let bcx = state.bcx;
135 let id = unit.pkg.package_id();
136 let deps = bcx.resolve.deps(id).filter(|&(_id, deps)| {
137 assert!(!deps.is_empty());
138 deps.iter().any(|dep| {
139 // If this target is a build command, then we only want build
140 // dependencies, otherwise we want everything *other than* build
141 // dependencies.
142 if unit.target.is_custom_build() != dep.is_build() {
143 return false;
144 }
145
146 // If this dependency is **not** a transitive dependency, then it
147 // only applies to test/example targets.
148 if !dep.is_transitive()
149 && !unit.target.is_test()
150 && !unit.target.is_example()
151 && !unit.mode.is_any_test()
152 {
153 return false;
154 }
155
156 // If this dependency is only available for certain platforms,
157 // make sure we're only enabling it for that platform.
158 if !bcx.dep_platform_activated(dep, unit.kind) {
159 return false;
160 }
161
162 // If we've gotten past all that, then this dependency is
163 // actually used!
164 true
165 })
166 });
167
168 let mut ret = Vec::new();
169 for (id, _) in deps {
170 let pkg = match state.get(id)? {
171 Some(pkg) => pkg,
172 None => continue,
173 };
174 let lib = match pkg.targets().iter().find(|t| t.is_lib()) {
175 Some(t) => t,
176 None => continue,
177 };
178 let mode = check_or_build_mode(unit.mode, lib);
179 let dep_unit_for = unit_for.with_for_host(lib.for_host());
180
181 if bcx.config.cli_unstable().dual_proc_macros
182 && lib.proc_macro()
183 && unit.kind == Kind::Target
184 {
185 let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Target, mode);
186 ret.push((unit, dep_unit_for));
187 let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Host, mode);
188 ret.push((unit, dep_unit_for));
189 } else {
190 let unit = new_unit(bcx, pkg, lib, dep_unit_for, unit.kind.for_target(lib), mode);
191 ret.push((unit, dep_unit_for));
192 }
193 }
194
195 // If this target is a build script, then what we've collected so far is
196 // all we need. If this isn't a build script, then it depends on the
197 // build script if there is one.
198 if unit.target.is_custom_build() {
199 return Ok(ret);
200 }
201 ret.extend(dep_build_script(unit, bcx));
202
203 // If this target is a binary, test, example, etc, then it depends on
204 // the library of the same package. The call to `resolve.deps` above
205 // didn't include `pkg` in the return values, so we need to special case
206 // it here and see if we need to push `(pkg, pkg_lib_target)`.
207 if unit.target.is_lib() && unit.mode != CompileMode::Doctest {
208 return Ok(ret);
209 }
210 ret.extend(maybe_lib(unit, bcx, unit_for));
211
212 // If any integration tests/benches are being run, make sure that
213 // binaries are built as well.
214 if !unit.mode.is_check()
215 && unit.mode.is_any_test()
216 && (unit.target.is_test() || unit.target.is_bench())
217 {
218 ret.extend(
219 unit.pkg
220 .targets()
221 .iter()
222 .filter(|t| {
223 let no_required_features = Vec::new();
224
225 t.is_bin() &&
226 // Skip binaries with required features that have not been selected.
227 t.required_features().unwrap_or(&no_required_features).iter().all(|f| {
228 unit.features.contains(&f.as_str())
229 })
230 })
231 .map(|t| {
232 (
233 new_unit(
234 bcx,
235 unit.pkg,
236 t,
237 UnitFor::new_normal(),
238 unit.kind.for_target(t),
239 CompileMode::Build,
240 ),
241 UnitFor::new_normal(),
242 )
243 }),
244 );
245 }
246
247 Ok(ret)
248 }
249
250 /// Returns the dependencies needed to run a build script.
251 ///
252 /// The `unit` provided must represent an execution of a build script, and
253 /// the returned set of units must all be run before `unit` is run.
254 fn compute_deps_custom_build<'a, 'cfg>(
255 unit: &Unit<'a>,
256 bcx: &BuildContext<'a, 'cfg>,
257 ) -> CargoResult<Vec<(Unit<'a>, UnitFor)>> {
258 if let Some(links) = unit.pkg.manifest().links() {
259 if bcx.script_override(links, unit.kind).is_some() {
260 // Overridden build scripts don't have any dependencies.
261 return Ok(Vec::new());
262 }
263 }
264
265 // When not overridden, then the dependencies to run a build script are:
266 //
267 // 1. Compiling the build script itself.
268 // 2. For each immediate dependency of our package which has a `links`
269 // key, the execution of that build script.
270 //
271 // We don't have a great way of handling (2) here right now so this is
272 // deferred until after the graph of all unit dependencies has been
273 // constructed.
274 let unit = new_unit(
275 bcx,
276 unit.pkg,
277 unit.target,
278 UnitFor::new_build(),
279 // Build scripts always compiled for the host.
280 Kind::Host,
281 CompileMode::Build,
282 );
283 // All dependencies of this unit should use profiles for custom
284 // builds.
285 Ok(vec![(unit, UnitFor::new_build())])
286 }
287
288 /// Returns the dependencies necessary to document a package.
289 fn compute_deps_doc<'a, 'cfg>(
290 unit: &Unit<'a>,
291 state: &mut State<'a, 'cfg>,
292 ) -> CargoResult<Vec<(Unit<'a>, UnitFor)>> {
293 let bcx = state.bcx;
294 let deps = bcx
295 .resolve
296 .deps(unit.pkg.package_id())
297 .filter(|&(_id, deps)| {
298 deps.iter().any(|dep| match dep.kind() {
299 DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind),
300 _ => false,
301 })
302 });
303
304 // To document a library, we depend on dependencies actually being
305 // built. If we're documenting *all* libraries, then we also depend on
306 // the documentation of the library being built.
307 let mut ret = Vec::new();
308 for (id, _deps) in deps {
309 let dep = match state.get(id)? {
310 Some(dep) => dep,
311 None => continue,
312 };
313 let lib = match dep.targets().iter().find(|t| t.is_lib()) {
314 Some(lib) => lib,
315 None => continue,
316 };
317 // Rustdoc only needs rmeta files for regular dependencies.
318 // However, for plugins/proc macros, deps should be built like normal.
319 let mode = check_or_build_mode(unit.mode, lib);
320 let dep_unit_for = UnitFor::new_normal().with_for_host(lib.for_host());
321 let lib_unit = new_unit(bcx, dep, lib, dep_unit_for, unit.kind.for_target(lib), mode);
322 ret.push((lib_unit, dep_unit_for));
323 if let CompileMode::Doc { deps: true } = unit.mode {
324 // Document this lib as well.
325 let doc_unit = new_unit(
326 bcx,
327 dep,
328 lib,
329 dep_unit_for,
330 unit.kind.for_target(lib),
331 unit.mode,
332 );
333 ret.push((doc_unit, dep_unit_for));
334 }
335 }
336
337 // Be sure to build/run the build script for documented libraries.
338 ret.extend(dep_build_script(unit, bcx));
339
340 // If we document a binary/example, we need the library available.
341 if unit.target.is_bin() || unit.target.is_example() {
342 ret.extend(maybe_lib(unit, bcx, UnitFor::new_normal()));
343 }
344 Ok(ret)
345 }
346
347 fn maybe_lib<'a>(
348 unit: &Unit<'a>,
349 bcx: &BuildContext<'a, '_>,
350 unit_for: UnitFor,
351 ) -> Option<(Unit<'a>, UnitFor)> {
352 unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| {
353 let mode = check_or_build_mode(unit.mode, t);
354 let unit = new_unit(bcx, unit.pkg, t, unit_for, unit.kind.for_target(t), mode);
355 (unit, unit_for)
356 })
357 }
358
359 /// If a build script is scheduled to be run for the package specified by
360 /// `unit`, this function will return the unit to run that build script.
361 ///
362 /// Overriding a build script simply means that the running of the build
363 /// script itself doesn't have any dependencies, so even in that case a unit
364 /// of work is still returned. `None` is only returned if the package has no
365 /// build script.
366 fn dep_build_script<'a>(
367 unit: &Unit<'a>,
368 bcx: &BuildContext<'a, '_>,
369 ) -> Option<(Unit<'a>, UnitFor)> {
370 unit.pkg
371 .targets()
372 .iter()
373 .find(|t| t.is_custom_build())
374 .map(|t| {
375 // The profile stored in the Unit is the profile for the thing
376 // the custom build script is running for.
377 let unit = bcx.units.intern(
378 unit.pkg,
379 t,
380 bcx.profiles.get_profile_run_custom_build(&unit.profile),
381 unit.kind,
382 CompileMode::RunCustomBuild,
383 bcx.resolve.features_sorted(unit.pkg.package_id()),
384 );
385
386 (unit, UnitFor::new_build())
387 })
388 }
389
390 /// Choose the correct mode for dependencies.
391 fn check_or_build_mode(mode: CompileMode, target: &Target) -> CompileMode {
392 match mode {
393 CompileMode::Check { .. } | CompileMode::Doc { .. } => {
394 if target.for_host() {
395 // Plugin and proc macro targets should be compiled like
396 // normal.
397 CompileMode::Build
398 } else {
399 // Regular dependencies should not be checked with --test.
400 // Regular dependencies of doc targets should emit rmeta only.
401 CompileMode::Check { test: false }
402 }
403 }
404 _ => CompileMode::Build,
405 }
406 }
407
408 fn new_unit<'a>(
409 bcx: &BuildContext<'a, '_>,
410 pkg: &'a Package,
411 target: &'a Target,
412 unit_for: UnitFor,
413 kind: Kind,
414 mode: CompileMode,
415 ) -> Unit<'a> {
416 let profile = bcx.profiles.get_profile(
417 pkg.package_id(),
418 bcx.ws.is_member(pkg),
419 unit_for,
420 mode,
421 bcx.build_config.release,
422 );
423
424 let features = bcx.resolve.features_sorted(pkg.package_id());
425 bcx.units.intern(pkg, target, profile, kind, mode, features)
426 }
427
428 /// Fill in missing dependencies for units of the `RunCustomBuild`
429 ///
430 /// As mentioned above in `compute_deps_custom_build` each build script
431 /// execution has two dependencies. The first is compiling the build script
432 /// itself (already added) and the second is that all crates the package of the
433 /// build script depends on with `links` keys, their build script execution. (a
434 /// bit confusing eh?)
435 ///
436 /// Here we take the entire `deps` map and add more dependencies from execution
437 /// of one build script to execution of another build script.
438 fn connect_run_custom_build_deps(state: &mut State<'_, '_>) {
439 let mut new_deps = Vec::new();
440
441 {
442 // First up build a reverse dependency map. This is a mapping of all
443 // `RunCustomBuild` known steps to the unit which depends on them. For
444 // example a library might depend on a build script, so this map will
445 // have the build script as the key and the library would be in the
446 // value's set.
447 let mut reverse_deps = HashMap::new();
448 for (unit, deps) in state.unit_dependencies.iter() {
449 for dep in deps {
450 if dep.mode == CompileMode::RunCustomBuild {
451 reverse_deps
452 .entry(dep)
453 .or_insert_with(HashSet::new)
454 .insert(unit);
455 }
456 }
457 }
458
459 // Next, we take a look at all build scripts executions listed in the
460 // dependency map. Our job here is to take everything that depends on
461 // this build script (from our reverse map above) and look at the other
462 // package dependencies of these parents.
463 //
464 // If we depend on a linkable target and the build script mentions
465 // `links`, then we depend on that package's build script! Here we use
466 // `dep_build_script` to manufacture an appropriate build script unit to
467 // depend on.
468 for unit in state
469 .unit_dependencies
470 .keys()
471 .filter(|k| k.mode == CompileMode::RunCustomBuild)
472 {
473 let reverse_deps = match reverse_deps.get(unit) {
474 Some(set) => set,
475 None => continue,
476 };
477
478 let to_add = reverse_deps
479 .iter()
480 .flat_map(|reverse_dep| state.unit_dependencies[reverse_dep].iter())
481 .filter(|other| {
482 other.pkg != unit.pkg
483 && other.target.linkable()
484 && other.pkg.manifest().links().is_some()
485 })
486 .filter_map(|other| dep_build_script(other, state.bcx).map(|p| p.0))
487 .collect::<HashSet<_>>();
488
489 if !to_add.is_empty() {
490 new_deps.push((*unit, to_add));
491 }
492 }
493 }
494
495 // And finally, add in all the missing dependencies!
496 for (unit, new_deps) in new_deps {
497 state
498 .unit_dependencies
499 .get_mut(&unit)
500 .unwrap()
501 .extend(new_deps);
502 }
503 }
504
505 impl<'a, 'cfg> State<'a, 'cfg> {
506 fn get(&mut self, id: PackageId) -> CargoResult<Option<&'a Package>> {
507 if let Some(pkg) = self.package_cache.get(&id) {
508 return Ok(Some(pkg));
509 }
510 if !self.waiting_on_download.insert(id) {
511 return Ok(None);
512 }
513 if let Some(pkg) = self.downloads.start(id)? {
514 self.package_cache.insert(id, pkg);
515 self.waiting_on_download.remove(&id);
516 return Ok(Some(pkg));
517 }
518 Ok(None)
519 }
520
521 /// Completes at least one downloading, maybe waiting for more to complete.
522 ///
523 /// This function will block the current thread waiting for at least one
524 /// crate to finish downloading. The function may continue to download more
525 /// crates if it looks like there's a long enough queue of crates to keep
526 /// downloading. When only a handful of packages remain this function
527 /// returns, and it's hoped that by returning we'll be able to push more
528 /// packages to download into the queue.
529 fn finish_some_downloads(&mut self) -> CargoResult<()> {
530 assert!(self.downloads.remaining() > 0);
531 loop {
532 let pkg = self.downloads.wait()?;
533 self.waiting_on_download.remove(&pkg.package_id());
534 self.package_cache.insert(pkg.package_id(), pkg);
535
536 // Arbitrarily choose that 5 or more packages concurrently download
537 // is a good enough number to "fill the network pipe". If we have
538 // less than this let's recompute the whole unit dependency graph
539 // again and try to find some more packages to download.
540 if self.downloads.remaining() < 5 {
541 break;
542 }
543 }
544 Ok(())
545 }
546 }