1 //! Resolution of the entire dependency graph for a crate.
2 //!
3 //! This module implements the core logic in taking the world of crates and
4 //! constraints and creating a resolved graph with locked versions for all
5 //! crates and their dependencies. This is separate from the registry module
6 //! which is more worried about discovering crates from various sources, this
7 //! module just uses the Registry trait as a source to learn about crates from.
8 //!
9 //! Actually solving a constraint graph is an NP-hard problem. This algorithm
10 //! is basically a nice heuristic to make sure we get roughly the best answer
11 //! most of the time. The constraints that we're working with are:
12 //!
13 //! 1. Each crate can have any number of dependencies. Each dependency can
14 //! declare a version range that it is compatible with.
15 //! 2. Crates can be activated with multiple version (e.g., show up in the
16 //! dependency graph twice) so long as each pairwise instance have
17 //! semver-incompatible versions.
18 //!
19 //! The algorithm employed here is fairly simple, we simply do a DFS, activating
20 //! the "newest crate" (highest version) first and then going to the next
21 //! option. The heuristics we employ are:
22 //!
23 //! * Never try to activate a crate version which is incompatible. This means we
24 //! only try crates which will actually satisfy a dependency and we won't ever
25 //! try to activate a crate that's semver compatible with something else
26 //! activated (as we're only allowed to have one) nor try to activate a crate
27 //! that has the same links attribute as something else
28 //! activated.
29 //! * Always try to activate the highest version crate first. The default
30 //! dependency in Cargo (e.g., when you write `foo = "0.1.2"`) is
31 //! semver-compatible, so selecting the highest version possible will allow us
32 //! to hopefully satisfy as many dependencies at once.
33 //!
34 //! Beyond that, what's implemented below is just a naive backtracking version
35 //! which should in theory try all possible combinations of dependencies and
36 //! versions to see if one works. The first resolution that works causes
37 //! everything to bail out immediately and return success, and only if *nothing*
38 //! works do we actually return an error up the stack.
39 //!
40 //! ## Performance
41 //!
42 //! Note that this is a relatively performance-critical portion of Cargo. The
43 //! data that we're processing is proportional to the size of the dependency
44 //! graph, which can often be quite large (e.g., take a look at Servo). To make
45 //! matters worse the DFS algorithm we're implemented is inherently quite
46 //! inefficient. When we add the requirement of backtracking on top it means
47 //! that we're implementing something that probably shouldn't be allocating all
48 //! over the place.
49
50 use std::collections::{BTreeMap, HashMap, HashSet};
51 use std::mem;
52 use std::rc::Rc;
53 use std::time::{Duration, Instant};
54
55 use log::{debug, trace};
56
57 use crate::core::PackageIdSpec;
58 use crate::core::{Dependency, PackageId, Registry, Summary};
59 use crate::util::config::Config;
60 use crate::util::errors::CargoResult;
61 use crate::util::profile;
62
63 use self::context::Context;
64 use self::dep_cache::RegistryQueryer;
65 use self::features::RequestedFeatures;
66 use self::types::{ConflictMap, ConflictReason, DepsFrame};
67 use self::types::{FeaturesSet, RcVecIter, RemainingDeps, ResolverProgress};
68
69 pub use self::encode::Metadata;
70 pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve};
71 pub use self::errors::{ActivateError, ActivateResult, ResolveError};
72 pub use self::features::{CliFeatures, ForceAllTargets, HasDevUnits};
73 pub use self::resolve::{Resolve, ResolveVersion};
74 pub use self::types::{ResolveBehavior, ResolveOpts};
75 pub use self::version_prefs::{VersionOrdering, VersionPreferences};
76
77 mod conflict_cache;
78 mod context;
79 mod dep_cache;
80 mod encode;
81 pub(crate) mod errors;
82 pub mod features;
83 mod resolve;
84 mod types;
85 mod version_prefs;
86
87 /// Builds the list of all packages required to build the first argument.
88 ///
89 /// * `summaries` - the list of package summaries along with how to resolve
90 /// their features. This is a list of all top-level packages that are intended
91 /// to be part of the lock file (resolve output). These typically are a list
92 /// of all workspace members.
93 ///
94 /// * `replacements` - this is a list of `[replace]` directives found in the
95 /// root of the workspace. The list here is a `PackageIdSpec` of what to
96 /// replace and a `Dependency` to replace that with. In general it's not
97 /// recommended to use `[replace]` any more and use `[patch]` instead, which
98 /// is supported elsewhere.
99 ///
100 /// * `registry` - this is the source from which all package summaries are
101 /// loaded. It's expected that this is extensively configured ahead of time
102 /// and is idempotent with our requests to it (aka returns the same results
103 /// for the same query every time). Typically this is an instance of a
104 /// `PackageRegistry`.
105 ///
106 /// * `version_prefs` - this represents a preference for some versions over others,
107 /// based on the lock file or other reasons such as `[patch]`es.
108 ///
109 /// * `config` - a location to print warnings and such, or `None` if no warnings
110 /// should be printed
111 ///
112 /// * `check_public_visible_dependencies` - a flag for whether to enforce the restrictions
113 /// introduced in the "public & private dependencies" RFC (1977). The current implementation
114 /// makes sure that there is only one version of each name visible to each package.
115 ///
116 /// But there are 2 stable ways to directly depend on different versions of the same name.
117 /// 1. Use the renamed dependencies functionality
118 /// 2. Use 'cfg({})' dependencies functionality
119 ///
120 /// When we have a decision for how to implement is without breaking existing functionality
121 /// this flag can be removed.
resolve( summaries: &[(Summary, ResolveOpts)], replacements: &[(PackageIdSpec, Dependency)], registry: &mut dyn Registry, version_prefs: &VersionPreferences, config: Option<&Config>, check_public_visible_dependencies: bool, ) -> CargoResult<Resolve>122 pub fn resolve(
123 summaries: &[(Summary, ResolveOpts)],
124 replacements: &[(PackageIdSpec, Dependency)],
125 registry: &mut dyn Registry,
126 version_prefs: &VersionPreferences,
127 config: Option<&Config>,
128 check_public_visible_dependencies: bool,
129 ) -> CargoResult<Resolve> {
130 let cx = Context::new(check_public_visible_dependencies);
131 let _p = profile::start("resolving");
132 let minimal_versions = match config {
133 Some(config) => config.cli_unstable().minimal_versions,
134 None => false,
135 };
136 let mut registry =
137 RegistryQueryer::new(registry, replacements, version_prefs, minimal_versions);
138 let cx = activate_deps_loop(cx, &mut registry, summaries, config)?;
139
140 let mut cksums = HashMap::new();
141 for (summary, _) in cx.activations.values() {
142 let cksum = summary.checksum().map(|s| s.to_string());
143 cksums.insert(summary.package_id(), cksum);
144 }
145 let graph = cx.graph();
146 let replacements = cx.resolve_replacements(®istry);
147 let features = cx
148 .resolve_features
149 .iter()
150 .map(|(k, v)| (*k, v.iter().cloned().collect()))
151 .collect();
152 let summaries = cx
153 .activations
154 .into_iter()
155 .map(|(_key, (summary, _age))| (summary.package_id(), summary))
156 .collect();
157 let resolve = Resolve::new(
158 graph,
159 replacements,
160 features,
161 cksums,
162 BTreeMap::new(),
163 Vec::new(),
164 ResolveVersion::default(),
165 summaries,
166 );
167
168 check_cycles(&resolve)?;
169 check_duplicate_pkgs_in_lockfile(&resolve)?;
170 trace!("resolved: {:?}", resolve);
171
172 Ok(resolve)
173 }
174
175 /// Recursively activates the dependencies for `summaries`, in depth-first order,
176 /// backtracking across possible candidates for each dependency as necessary.
177 ///
178 /// If all dependencies can be activated and resolved to a version in the
179 /// dependency graph, `cx` is returned.
activate_deps_loop( mut cx: Context, registry: &mut RegistryQueryer<'_>, summaries: &[(Summary, ResolveOpts)], config: Option<&Config>, ) -> CargoResult<Context>180 fn activate_deps_loop(
181 mut cx: Context,
182 registry: &mut RegistryQueryer<'_>,
183 summaries: &[(Summary, ResolveOpts)],
184 config: Option<&Config>,
185 ) -> CargoResult<Context> {
186 let mut backtrack_stack = Vec::new();
187 let mut remaining_deps = RemainingDeps::new();
188
189 // `past_conflicting_activations` is a cache of the reasons for each time we
190 // backtrack.
191 let mut past_conflicting_activations = conflict_cache::ConflictCache::new();
192
193 // Activate all the initial summaries to kick off some work.
194 for &(ref summary, ref opts) in summaries {
195 debug!("initial activation: {}", summary.package_id());
196 let res = activate(&mut cx, registry, None, summary.clone(), opts);
197 match res {
198 Ok(Some((frame, _))) => remaining_deps.push(frame),
199 Ok(None) => (),
200 Err(ActivateError::Fatal(e)) => return Err(e),
201 Err(ActivateError::Conflict(_, _)) => panic!("bad error from activate"),
202 }
203 }
204
205 let mut printed = ResolverProgress::new();
206
207 // Main resolution loop, this is the workhorse of the resolution algorithm.
208 //
209 // You'll note that a few stacks are maintained on the side, which might
210 // seem odd when this algorithm looks like it could be implemented
211 // recursively. While correct, this is implemented iteratively to avoid
212 // blowing the stack (the recursion depth is proportional to the size of the
213 // input).
214 //
215 // The general sketch of this loop is to run until there are no dependencies
216 // left to activate, and for each dependency to attempt to activate all of
217 // its own dependencies in turn. The `backtrack_stack` is a side table of
218 // backtracking states where if we hit an error we can return to in order to
219 // attempt to continue resolving.
220 while let Some((just_here_for_the_error_messages, frame)) =
221 remaining_deps.pop_most_constrained()
222 {
223 let (mut parent, (mut dep, candidates, mut features)) = frame;
224
225 // If we spend a lot of time here (we shouldn't in most cases) then give
226 // a bit of a visual indicator as to what we're doing.
227 printed.shell_status(config)?;
228
229 trace!(
230 "{}[{}]>{} {} candidates",
231 parent.name(),
232 cx.age,
233 dep.package_name(),
234 candidates.len()
235 );
236
237 let just_here_for_the_error_messages = just_here_for_the_error_messages
238 && past_conflicting_activations
239 .conflicting(&cx, &dep)
240 .is_some();
241
242 let mut remaining_candidates = RemainingCandidates::new(&candidates);
243
244 // `conflicting_activations` stores all the reasons we were unable to
245 // activate candidates. One of these reasons will have to go away for
246 // backtracking to find a place to restart. It is also the list of
247 // things to explain in the error message if we fail to resolve.
248 //
249 // This is a map of package ID to a reason why that packaged caused a
250 // conflict for us.
251 let mut conflicting_activations = ConflictMap::new();
252
253 // When backtracking we don't fully update `conflicting_activations`
254 // especially for the cases that we didn't make a backtrack frame in the
255 // first place. This `backtracked` var stores whether we are continuing
256 // from a restored backtrack frame so that we can skip caching
257 // `conflicting_activations` in `past_conflicting_activations`
258 let mut backtracked = false;
259
260 loop {
261 let next = remaining_candidates.next(
262 &mut conflicting_activations,
263 &cx,
264 &dep,
265 parent.package_id(),
266 );
267
268 let (candidate, has_another) = next.ok_or(()).or_else(|_| {
269 // If we get here then our `remaining_candidates` was just
270 // exhausted, so `dep` failed to activate.
271 //
272 // It's our job here to backtrack, if possible, and find a
273 // different candidate to activate. If we can't find any
274 // candidates whatsoever then it's time to bail entirely.
275 trace!(
276 "{}[{}]>{} -- no candidates",
277 parent.name(),
278 cx.age,
279 dep.package_name()
280 );
281
282 // Use our list of `conflicting_activations` to add to our
283 // global list of past conflicting activations, effectively
284 // globally poisoning `dep` if `conflicting_activations` ever
285 // shows up again. We'll use the `past_conflicting_activations`
286 // below to determine if a dependency is poisoned and skip as
287 // much work as possible.
288 //
289 // If we're only here for the error messages then there's no
290 // need to try this as this dependency is already known to be
291 // bad.
292 //
293 // As we mentioned above with the `backtracked` variable if this
294 // local is set to `true` then our `conflicting_activations` may
295 // not be right, so we can't push into our global cache.
296 let mut generalize_conflicting_activations = None;
297 if !just_here_for_the_error_messages && !backtracked {
298 past_conflicting_activations.insert(&dep, &conflicting_activations);
299 if let Some(c) = generalize_conflicting(
300 &cx,
301 registry,
302 &mut past_conflicting_activations,
303 &parent,
304 &dep,
305 &conflicting_activations,
306 ) {
307 generalize_conflicting_activations = Some(c);
308 }
309 }
310
311 match find_candidate(
312 &cx,
313 &mut backtrack_stack,
314 &parent,
315 backtracked,
316 generalize_conflicting_activations
317 .as_ref()
318 .unwrap_or(&conflicting_activations),
319 ) {
320 Some((candidate, has_another, frame)) => {
321 // Reset all of our local variables used with the
322 // contents of `frame` to complete our backtrack.
323 cx = frame.context;
324 remaining_deps = frame.remaining_deps;
325 remaining_candidates = frame.remaining_candidates;
326 parent = frame.parent;
327 dep = frame.dep;
328 features = frame.features;
329 conflicting_activations = frame.conflicting_activations;
330 backtracked = true;
331 Ok((candidate, has_another))
332 }
333 None => {
334 debug!("no candidates found");
335 Err(errors::activation_error(
336 &cx,
337 registry.registry,
338 &parent,
339 &dep,
340 &conflicting_activations,
341 &candidates,
342 config,
343 ))
344 }
345 }
346 })?;
347
348 // If we're only here for the error messages then we know that this
349 // activation will fail one way or another. To that end if we've got
350 // more candidates we want to fast-forward to the last one as
351 // otherwise we'll just backtrack here anyway (helping us to skip
352 // some work).
353 if just_here_for_the_error_messages && !backtracked && has_another {
354 continue;
355 }
356
357 // We have a `candidate`. Create a `BacktrackFrame` so we can add it
358 // to the `backtrack_stack` later if activation succeeds.
359 //
360 // Note that if we don't actually have another candidate then there
361 // will be nothing to backtrack to so we skip construction of the
362 // frame. This is a relatively important optimization as a number of
363 // the `clone` calls below can be quite expensive, so we avoid them
364 // if we can.
365 let backtrack = if has_another {
366 Some(BacktrackFrame {
367 context: Context::clone(&cx),
368 remaining_deps: remaining_deps.clone(),
369 remaining_candidates: remaining_candidates.clone(),
370 parent: Summary::clone(&parent),
371 dep: Dependency::clone(&dep),
372 features: Rc::clone(&features),
373 conflicting_activations: conflicting_activations.clone(),
374 })
375 } else {
376 None
377 };
378
379 let pid = candidate.package_id();
380 let opts = ResolveOpts {
381 dev_deps: false,
382 features: RequestedFeatures::DepFeatures {
383 features: Rc::clone(&features),
384 uses_default_features: dep.uses_default_features(),
385 },
386 };
387 trace!(
388 "{}[{}]>{} trying {}",
389 parent.name(),
390 cx.age,
391 dep.package_name(),
392 candidate.version()
393 );
394 let res = activate(&mut cx, registry, Some((&parent, &dep)), candidate, &opts);
395
396 let successfully_activated = match res {
397 // Success! We've now activated our `candidate` in our context
398 // and we're almost ready to move on. We may want to scrap this
399 // frame in the end if it looks like it's not going to end well,
400 // so figure that out here.
401 Ok(Some((mut frame, dur))) => {
402 printed.elapsed(dur);
403
404 // Our `frame` here is a new package with its own list of
405 // dependencies. Do a sanity check here of all those
406 // dependencies by cross-referencing our global
407 // `past_conflicting_activations`. Recall that map is a
408 // global cache which lists sets of packages where, when
409 // activated, the dependency is unresolvable.
410 //
411 // If any our our frame's dependencies fit in that bucket,
412 // aka known unresolvable, then we extend our own set of
413 // conflicting activations with theirs. We can do this
414 // because the set of conflicts we found implies the
415 // dependency can't be activated which implies that we
416 // ourselves can't be activated, so we know that they
417 // conflict with us.
418 let mut has_past_conflicting_dep = just_here_for_the_error_messages;
419 if !has_past_conflicting_dep {
420 if let Some(conflicting) = frame
421 .remaining_siblings
422 .clone()
423 .filter_map(|(ref new_dep, _, _)| {
424 past_conflicting_activations.conflicting(&cx, new_dep)
425 })
426 .next()
427 {
428 // If one of our deps is known unresolvable
429 // then we will not succeed.
430 // How ever if we are part of the reason that
431 // one of our deps conflicts then
432 // we can make a stronger statement
433 // because we will definitely be activated when
434 // we try our dep.
435 conflicting_activations.extend(
436 conflicting
437 .iter()
438 .filter(|&(p, _)| p != &pid)
439 .map(|(&p, r)| (p, r.clone())),
440 );
441
442 has_past_conflicting_dep = true;
443 }
444 }
445 // If any of `remaining_deps` are known unresolvable with
446 // us activated, then we extend our own set of
447 // conflicting activations with theirs and its parent. We can do this
448 // because the set of conflicts we found implies the
449 // dependency can't be activated which implies that we
450 // ourselves are incompatible with that dep, so we know that deps
451 // parent conflict with us.
452 if !has_past_conflicting_dep {
453 if let Some(known_related_bad_deps) =
454 past_conflicting_activations.dependencies_conflicting_with(pid)
455 {
456 if let Some((other_parent, conflict)) = remaining_deps
457 .iter()
458 // for deps related to us
459 .filter(|&(_, ref other_dep)| {
460 known_related_bad_deps.contains(other_dep)
461 })
462 .filter_map(|(other_parent, other_dep)| {
463 past_conflicting_activations
464 .find_conflicting(&cx, &other_dep, Some(pid))
465 .map(|con| (other_parent, con))
466 })
467 .next()
468 {
469 let rel = conflict.get(&pid).unwrap().clone();
470
471 // The conflict we found is
472 // "other dep will not succeed if we are activated."
473 // We want to add
474 // "our dep will not succeed if other dep is in remaining_deps"
475 // but that is not how the cache is set up.
476 // So we add the less general but much faster,
477 // "our dep will not succeed if other dep's parent is activated".
478 conflicting_activations.extend(
479 conflict
480 .iter()
481 .filter(|&(p, _)| p != &pid)
482 .map(|(&p, r)| (p, r.clone())),
483 );
484 conflicting_activations.insert(other_parent, rel);
485 has_past_conflicting_dep = true;
486 }
487 }
488 }
489
490 // Ok if we're in a "known failure" state for this frame we
491 // may want to skip it altogether though. We don't want to
492 // skip it though in the case that we're displaying error
493 // messages to the user!
494 //
495 // Here we need to figure out if the user will see if we
496 // skipped this candidate (if it's known to fail, aka has a
497 // conflicting dep and we're the last candidate). If we're
498 // here for the error messages, we can't skip it (but we can
499 // prune extra work). If we don't have any candidates in our
500 // backtrack stack then we're the last line of defense, so
501 // we'll want to present an error message for sure.
502 let activate_for_error_message = has_past_conflicting_dep && !has_another && {
503 just_here_for_the_error_messages || {
504 find_candidate(
505 &cx,
506 &mut backtrack_stack.clone(),
507 &parent,
508 backtracked,
509 &conflicting_activations,
510 )
511 .is_none()
512 }
513 };
514
515 // If we're only here for the error messages then we know
516 // one of our candidate deps will fail, meaning we will
517 // fail and that none of the backtrack frames will find a
518 // candidate that will help. Consequently let's clean up the
519 // no longer needed backtrack frames.
520 if activate_for_error_message {
521 backtrack_stack.clear();
522 }
523
524 // If we don't know for a fact that we'll fail or if we're
525 // just here for the error message then we push this frame
526 // onto our list of to-be-resolve, which will generate more
527 // work for us later on.
528 //
529 // Otherwise we're guaranteed to fail and were not here for
530 // error messages, so we skip work and don't push anything
531 // onto our stack.
532 frame.just_for_error_messages = has_past_conflicting_dep;
533 if !has_past_conflicting_dep || activate_for_error_message {
534 remaining_deps.push(frame);
535 true
536 } else {
537 trace!(
538 "{}[{}]>{} skipping {} ",
539 parent.name(),
540 cx.age,
541 dep.package_name(),
542 pid.version()
543 );
544 false
545 }
546 }
547
548 // This candidate's already activated, so there's no extra work
549 // for us to do. Let's keep going.
550 Ok(None) => true,
551
552 // We failed with a super fatal error (like a network error), so
553 // bail out as quickly as possible as we can't reliably
554 // backtrack from errors like these
555 Err(ActivateError::Fatal(e)) => return Err(e),
556
557 // We failed due to a bland conflict, bah! Record this in our
558 // frame's list of conflicting activations as to why this
559 // candidate failed, and then move on.
560 Err(ActivateError::Conflict(id, reason)) => {
561 conflicting_activations.insert(id, reason);
562 false
563 }
564 };
565
566 // If we've successfully activated then save off the backtrack frame
567 // if one was created, and otherwise break out of the inner
568 // activation loop as we're ready to move to the next dependency
569 if successfully_activated {
570 backtrack_stack.extend(backtrack);
571 break;
572 }
573
574 // We've failed to activate this dependency, oh dear! Our call to
575 // `activate` above may have altered our `cx` local variable, so
576 // restore it back if we've got a backtrack frame.
577 //
578 // If we don't have a backtrack frame then we're just using the `cx`
579 // for error messages anyway so we can live with a little
580 // imprecision.
581 if let Some(b) = backtrack {
582 cx = b.context;
583 }
584 }
585
586 // Ok phew, that loop was a big one! If we've broken out then we've
587 // successfully activated a candidate. Our stacks are all in place that
588 // we're ready to move on to the next dependency that needs activation,
589 // so loop back to the top of the function here.
590 }
591
592 Ok(cx)
593 }
594
595 /// Attempts to activate the summary `candidate` in the context `cx`.
596 ///
597 /// This function will pull dependency summaries from the registry provided, and
598 /// the dependencies of the package will be determined by the `opts` provided.
599 /// If `candidate` was activated, this function returns the dependency frame to
600 /// iterate through next.
activate( cx: &mut Context, registry: &mut RegistryQueryer<'_>, parent: Option<(&Summary, &Dependency)>, candidate: Summary, opts: &ResolveOpts, ) -> ActivateResult<Option<(DepsFrame, Duration)>>601 fn activate(
602 cx: &mut Context,
603 registry: &mut RegistryQueryer<'_>,
604 parent: Option<(&Summary, &Dependency)>,
605 candidate: Summary,
606 opts: &ResolveOpts,
607 ) -> ActivateResult<Option<(DepsFrame, Duration)>> {
608 let candidate_pid = candidate.package_id();
609 cx.age += 1;
610 if let Some((parent, dep)) = parent {
611 let parent_pid = parent.package_id();
612 // add an edge from candidate to parent in the parents graph
613 cx.parents
614 .link(candidate_pid, parent_pid)
615 // and associate dep with that edge
616 .insert(dep.clone());
617 if let Some(public_dependency) = cx.public_dependency.as_mut() {
618 public_dependency.add_edge(
619 candidate_pid,
620 parent_pid,
621 dep.is_public(),
622 cx.age,
623 &cx.parents,
624 );
625 }
626 }
627
628 let activated = cx.flag_activated(&candidate, opts, parent)?;
629
630 let candidate = match registry.replacement_summary(candidate_pid) {
631 Some(replace) => {
632 // Note the `None` for parent here since `[replace]` is a bit wonky
633 // and doesn't activate the same things that `[patch]` typically
634 // does. TBH it basically cause panics in the test suite if
635 // `parent` is passed through here and `[replace]` is otherwise
636 // on life support so it's not critical to fix bugs anyway per se.
637 if cx.flag_activated(replace, opts, None)? && activated {
638 return Ok(None);
639 }
640 trace!(
641 "activating {} (replacing {})",
642 replace.package_id(),
643 candidate_pid
644 );
645 replace.clone()
646 }
647 None => {
648 if activated {
649 return Ok(None);
650 }
651 trace!("activating {}", candidate_pid);
652 candidate
653 }
654 };
655
656 let now = Instant::now();
657 let (used_features, deps) =
658 &*registry.build_deps(cx, parent.map(|p| p.0.package_id()), &candidate, opts)?;
659
660 // Record what list of features is active for this package.
661 if !used_features.is_empty() {
662 Rc::make_mut(
663 cx.resolve_features
664 .entry(candidate.package_id())
665 .or_insert_with(Rc::default),
666 )
667 .extend(used_features);
668 }
669
670 let frame = DepsFrame {
671 parent: candidate,
672 just_for_error_messages: false,
673 remaining_siblings: RcVecIter::new(Rc::clone(deps)),
674 };
675 Ok(Some((frame, now.elapsed())))
676 }
677
678 #[derive(Clone)]
679 struct BacktrackFrame {
680 context: Context,
681 remaining_deps: RemainingDeps,
682 remaining_candidates: RemainingCandidates,
683 parent: Summary,
684 dep: Dependency,
685 features: FeaturesSet,
686 conflicting_activations: ConflictMap,
687 }
688
689 /// A helper "iterator" used to extract candidates within a current `Context` of
690 /// a dependency graph.
691 ///
692 /// This struct doesn't literally implement the `Iterator` trait (requires a few
693 /// more inputs) but in general acts like one. Each `RemainingCandidates` is
694 /// created with a list of candidates to choose from. When attempting to iterate
695 /// over the list of candidates only *valid* candidates are returned. Validity
696 /// is defined within a `Context`.
697 ///
698 /// Candidates passed to `new` may not be returned from `next` as they could be
699 /// filtered out, and as they are filtered the causes will be added to `conflicting_prev_active`.
700 #[derive(Clone)]
701 struct RemainingCandidates {
702 remaining: RcVecIter<Summary>,
703 // This is an inlined peekable generator
704 has_another: Option<Summary>,
705 }
706
707 impl RemainingCandidates {
new(candidates: &Rc<Vec<Summary>>) -> RemainingCandidates708 fn new(candidates: &Rc<Vec<Summary>>) -> RemainingCandidates {
709 RemainingCandidates {
710 remaining: RcVecIter::new(Rc::clone(candidates)),
711 has_another: None,
712 }
713 }
714
715 /// Attempts to find another candidate to check from this list.
716 ///
717 /// This method will attempt to move this iterator forward, returning a
718 /// candidate that's possible to activate. The `cx` argument is the current
719 /// context which determines validity for candidates returned, and the `dep`
720 /// is the dependency listing that we're activating for.
721 ///
722 /// If successful a `(Candidate, bool)` pair will be returned. The
723 /// `Candidate` is the candidate to attempt to activate, and the `bool` is
724 /// an indicator of whether there are remaining candidates to try of if
725 /// we've reached the end of iteration.
726 ///
727 /// If we've reached the end of the iterator here then `Err` will be
728 /// returned. The error will contain a map of package ID to conflict reason,
729 /// where each package ID caused a candidate to be filtered out from the
730 /// original list for the reason listed.
next( &mut self, conflicting_prev_active: &mut ConflictMap, cx: &Context, dep: &Dependency, parent: PackageId, ) -> Option<(Summary, bool)>731 fn next(
732 &mut self,
733 conflicting_prev_active: &mut ConflictMap,
734 cx: &Context,
735 dep: &Dependency,
736 parent: PackageId,
737 ) -> Option<(Summary, bool)> {
738 for b in self.remaining.by_ref() {
739 let b_id = b.package_id();
740 // The `links` key in the manifest dictates that there's only one
741 // package in a dependency graph, globally, with that particular
742 // `links` key. If this candidate links to something that's already
743 // linked to by a different package then we've gotta skip this.
744 if let Some(link) = b.links() {
745 if let Some(&a) = cx.links.get(&link) {
746 if a != b_id {
747 conflicting_prev_active
748 .entry(a)
749 .or_insert_with(|| ConflictReason::Links(link));
750 continue;
751 }
752 }
753 }
754
755 // Otherwise the condition for being a valid candidate relies on
756 // semver. Cargo dictates that you can't duplicate multiple
757 // semver-compatible versions of a crate. For example we can't
758 // simultaneously activate `foo 1.0.2` and `foo 1.2.0`. We can,
759 // however, activate `1.0.2` and `2.0.0`.
760 //
761 // Here we throw out our candidate if it's *compatible*, yet not
762 // equal, to all previously activated versions.
763 if let Some((a, _)) = cx.activations.get(&b_id.as_activations_key()) {
764 if *a != b {
765 conflicting_prev_active
766 .entry(a.package_id())
767 .or_insert(ConflictReason::Semver);
768 continue;
769 }
770 }
771 // We may still have to reject do to a public dependency conflict. If one of any of our
772 // ancestors that can see us already knows about a different crate with this name then
773 // we have to reject this candidate. Additionally this candidate may already have been
774 // activated and have public dependants of its own,
775 // all of witch also need to be checked the same way.
776 if let Some(public_dependency) = cx.public_dependency.as_ref() {
777 if let Err(((c1, c2), c3)) =
778 public_dependency.can_add_edge(b_id, parent, dep.is_public(), &cx.parents)
779 {
780 conflicting_prev_active.insert(c1.0, c1.1);
781 conflicting_prev_active.insert(c2.0, c2.1);
782 if let Some(c3) = c3 {
783 conflicting_prev_active.insert(c3.0, c3.1);
784 }
785 continue;
786 }
787 }
788
789 // Well if we made it this far then we've got a valid dependency. We
790 // want this iterator to be inherently "peekable" so we don't
791 // necessarily return the item just yet. Instead we stash it away to
792 // get returned later, and if we replaced something then that was
793 // actually the candidate to try first so we return that.
794 if let Some(r) = mem::replace(&mut self.has_another, Some(b)) {
795 return Some((r, true));
796 }
797 }
798
799 // Alright we've entirely exhausted our list of candidates. If we've got
800 // something stashed away return that here (also indicating that there's
801 // nothing else).
802 self.has_another.take().map(|r| (r, false))
803 }
804 }
805
806 /// Attempts to find a new conflict that allows a `find_candidate` feather then the input one.
807 /// It will add the new conflict to the cache if one is found.
808 ///
809 /// Panics if the input conflict is not all active in `cx`.
generalize_conflicting( cx: &Context, registry: &mut RegistryQueryer<'_>, past_conflicting_activations: &mut conflict_cache::ConflictCache, parent: &Summary, dep: &Dependency, conflicting_activations: &ConflictMap, ) -> Option<ConflictMap>810 fn generalize_conflicting(
811 cx: &Context,
812 registry: &mut RegistryQueryer<'_>,
813 past_conflicting_activations: &mut conflict_cache::ConflictCache,
814 parent: &Summary,
815 dep: &Dependency,
816 conflicting_activations: &ConflictMap,
817 ) -> Option<ConflictMap> {
818 if conflicting_activations.is_empty() {
819 return None;
820 }
821 // We need to determine the `ContextAge` that this `conflicting_activations` will jump to, and why.
822 let (backtrack_critical_age, backtrack_critical_id) = conflicting_activations
823 .keys()
824 .map(|&c| (cx.is_active(c).expect("not currently active!?"), c))
825 .max()
826 .unwrap();
827 let backtrack_critical_reason: ConflictReason =
828 conflicting_activations[&backtrack_critical_id].clone();
829
830 if backtrack_critical_reason.is_public_dependency() {
831 return None;
832 }
833
834 if cx
835 .parents
836 .is_path_from_to(&parent.package_id(), &backtrack_critical_id)
837 {
838 // We are a descendant of the trigger of the problem.
839 // The best generalization of this is to let things bubble up
840 // and let `backtrack_critical_id` figure this out.
841 return None;
842 }
843 // What parents does that critical activation have
844 for (critical_parent, critical_parents_deps) in
845 cx.parents.edges(&backtrack_critical_id).filter(|(p, _)| {
846 // it will only help backjump further if it is older then the critical_age
847 cx.is_active(**p).expect("parent not currently active!?") < backtrack_critical_age
848 })
849 {
850 for critical_parents_dep in critical_parents_deps.iter() {
851 // A dep is equivalent to one of the things it can resolve to.
852 // Thus, if all the things it can resolve to have already ben determined
853 // to be conflicting, then we can just say that we conflict with the parent.
854 if let Some(others) = registry
855 .query(critical_parents_dep)
856 .expect("an already used dep now error!?")
857 .iter()
858 .rev() // the last one to be tried is the least likely to be in the cache, so start with that.
859 .map(|other| {
860 past_conflicting_activations
861 .find(
862 dep,
863 &|id| {
864 if id == other.package_id() {
865 // we are imagining that we used other instead
866 Some(backtrack_critical_age)
867 } else {
868 cx.is_active(id)
869 }
870 },
871 Some(other.package_id()),
872 // we only care about things that are newer then critical_age
873 backtrack_critical_age,
874 )
875 .map(|con| (other.package_id(), con))
876 })
877 .collect::<Option<Vec<(PackageId, &ConflictMap)>>>()
878 {
879 let mut con = conflicting_activations.clone();
880 // It is always valid to combine previously inserted conflicts.
881 // A, B are both known bad states each that can never be activated.
882 // A + B is redundant but can't be activated, as if
883 // A + B is active then A is active and we know that is not ok.
884 for (_, other) in &others {
885 con.extend(other.iter().map(|(&id, re)| (id, re.clone())));
886 }
887 // Now that we have this combined conflict, we can do a substitution:
888 // A dep is equivalent to one of the things it can resolve to.
889 // So we can remove all the things that it resolves to and replace with the parent.
890 for (other_id, _) in &others {
891 con.remove(other_id);
892 }
893 con.insert(*critical_parent, backtrack_critical_reason);
894
895 if cfg!(debug_assertions) {
896 // the entire point is to find an older conflict, so let's make sure we did
897 let new_age = con
898 .keys()
899 .map(|&c| cx.is_active(c).expect("not currently active!?"))
900 .max()
901 .unwrap();
902 assert!(
903 new_age < backtrack_critical_age,
904 "new_age {} < backtrack_critical_age {}",
905 new_age,
906 backtrack_critical_age
907 );
908 }
909 past_conflicting_activations.insert(dep, &con);
910 return Some(con);
911 }
912 }
913 }
914 None
915 }
916
917 /// Looks through the states in `backtrack_stack` for dependencies with
918 /// remaining candidates. For each one, also checks if rolling back
919 /// could change the outcome of the failed resolution that caused backtracking
920 /// in the first place. Namely, if we've backtracked past the parent of the
921 /// failed dep, or any of the packages flagged as giving us trouble in
922 /// `conflicting_activations`.
923 ///
924 /// Read <https://github.com/rust-lang/cargo/pull/4834>
925 /// For several more detailed explanations of the logic here.
find_candidate( cx: &Context, backtrack_stack: &mut Vec<BacktrackFrame>, parent: &Summary, backtracked: bool, conflicting_activations: &ConflictMap, ) -> Option<(Summary, bool, BacktrackFrame)>926 fn find_candidate(
927 cx: &Context,
928 backtrack_stack: &mut Vec<BacktrackFrame>,
929 parent: &Summary,
930 backtracked: bool,
931 conflicting_activations: &ConflictMap,
932 ) -> Option<(Summary, bool, BacktrackFrame)> {
933 // When we're calling this method we know that `parent` failed to
934 // activate. That means that some dependency failed to get resolved for
935 // whatever reason. Normally, that means that all of those reasons
936 // (plus maybe some extras) are listed in `conflicting_activations`.
937 //
938 // The abnormal situations are things that do not put all of the reasons in `conflicting_activations`:
939 // If we backtracked we do not know how our `conflicting_activations` related to
940 // the cause of that backtrack, so we do not update it.
941 let age = if !backtracked {
942 // we don't have abnormal situations. So we can ask `cx` for how far back we need to go.
943 let a = cx.is_conflicting(Some(parent.package_id()), conflicting_activations);
944 // If the `conflicting_activations` does not apply to `cx`, then something went very wrong
945 // in building it. But we will just fall back to laboriously trying all possibilities witch
946 // will give us the correct answer so only `assert` if there is a developer to debug it.
947 debug_assert!(a.is_some());
948 a
949 } else {
950 None
951 };
952
953 while let Some(mut frame) = backtrack_stack.pop() {
954 let next = frame.remaining_candidates.next(
955 &mut frame.conflicting_activations,
956 &frame.context,
957 &frame.dep,
958 frame.parent.package_id(),
959 );
960 let (candidate, has_another) = match next {
961 Some(pair) => pair,
962 None => continue,
963 };
964
965 // If all members of `conflicting_activations` are still
966 // active in this back up we know that we're guaranteed to not actually
967 // make any progress. As a result if we hit this condition we can
968 // completely skip this backtrack frame and move on to the next.
969 if let Some(age) = age {
970 if frame.context.age >= age {
971 trace!(
972 "{} = \"{}\" skip as not solving {}: {:?}",
973 frame.dep.package_name(),
974 frame.dep.version_req(),
975 parent.package_id(),
976 conflicting_activations
977 );
978 // above we use `cx` to determine that this is still going to be conflicting.
979 // but lets just double check.
980 debug_assert!(
981 frame
982 .context
983 .is_conflicting(Some(parent.package_id()), conflicting_activations)
984 == Some(age)
985 );
986 continue;
987 } else {
988 // above we use `cx` to determine that this is not going to be conflicting.
989 // but lets just double check.
990 debug_assert!(frame
991 .context
992 .is_conflicting(Some(parent.package_id()), conflicting_activations)
993 .is_none());
994 }
995 }
996
997 return Some((candidate, has_another, frame));
998 }
999 None
1000 }
1001
check_cycles(resolve: &Resolve) -> CargoResult<()>1002 fn check_cycles(resolve: &Resolve) -> CargoResult<()> {
1003 // Create a simple graph representation alternative of `resolve` which has
1004 // only the edges we care about. Note that `BTree*` is used to produce
1005 // deterministic error messages here. Also note that the main reason for
1006 // this copy of the resolve graph is to avoid edges between a crate and its
1007 // dev-dependency since that doesn't count for cycles.
1008 let mut graph = BTreeMap::new();
1009 for id in resolve.iter() {
1010 let map = graph.entry(id).or_insert_with(BTreeMap::new);
1011 for (dep_id, listings) in resolve.deps_not_replaced(id) {
1012 let transitive_dep = listings.iter().find(|d| d.is_transitive());
1013
1014 if let Some(transitive_dep) = transitive_dep.cloned() {
1015 map.insert(dep_id, transitive_dep.clone());
1016 resolve
1017 .replacement(dep_id)
1018 .map(|p| map.insert(p, transitive_dep));
1019 }
1020 }
1021 }
1022
1023 // After we have the `graph` that we care about, perform a simple cycle
1024 // check by visiting all nodes. We visit each node at most once and we keep
1025 // track of the path through the graph as we walk it. If we walk onto the
1026 // same node twice that's a cycle.
1027 let mut checked = HashSet::new();
1028 let mut path = Vec::new();
1029 let mut visited = HashSet::new();
1030 for pkg in graph.keys() {
1031 if !checked.contains(pkg) {
1032 visit(&graph, *pkg, &mut visited, &mut path, &mut checked)?
1033 }
1034 }
1035 return Ok(());
1036
1037 fn visit(
1038 graph: &BTreeMap<PackageId, BTreeMap<PackageId, Dependency>>,
1039 id: PackageId,
1040 visited: &mut HashSet<PackageId>,
1041 path: &mut Vec<PackageId>,
1042 checked: &mut HashSet<PackageId>,
1043 ) -> CargoResult<()> {
1044 path.push(id);
1045 if !visited.insert(id) {
1046 let iter = path.iter().rev().skip(1).scan(id, |child, parent| {
1047 let dep = graph.get(parent).and_then(|adjacent| adjacent.get(child));
1048 *child = *parent;
1049 Some((parent, dep))
1050 });
1051 let iter = std::iter::once((&id, None)).chain(iter);
1052 anyhow::bail!(
1053 "cyclic package dependency: package `{}` depends on itself. Cycle:\n{}",
1054 id,
1055 errors::describe_path(iter),
1056 );
1057 }
1058
1059 if checked.insert(id) {
1060 for dep in graph[&id].keys() {
1061 visit(graph, *dep, visited, path, checked)?;
1062 }
1063 }
1064
1065 path.pop();
1066 visited.remove(&id);
1067 Ok(())
1068 }
1069 }
1070
1071 /// Checks that packages are unique when written to lock file.
1072 ///
1073 /// When writing package ID's to lock file, we apply lossy encoding. In
1074 /// particular, we don't store paths of path dependencies. That means that
1075 /// *different* packages may collide in the lock file, hence this check.
check_duplicate_pkgs_in_lockfile(resolve: &Resolve) -> CargoResult<()>1076 fn check_duplicate_pkgs_in_lockfile(resolve: &Resolve) -> CargoResult<()> {
1077 let mut unique_pkg_ids = HashMap::new();
1078 let state = encode::EncodeState::new(resolve);
1079 for pkg_id in resolve.iter() {
1080 let encodable_pkd_id = encode::encodable_package_id(pkg_id, &state, resolve.version());
1081 if let Some(prev_pkg_id) = unique_pkg_ids.insert(encodable_pkd_id, pkg_id) {
1082 anyhow::bail!(
1083 "package collision in the lockfile: packages {} and {} are different, \
1084 but only one can be written to lockfile unambiguously",
1085 prev_pkg_id,
1086 pkg_id
1087 )
1088 }
1089 }
1090 Ok(())
1091 }
1092