// possible.
#[derive(Clone)]
struct Context<'a> {
- // TODO: Both this and the map two below are super expensive to clone. We should
- // TODO: Both this and the map below are super expensive to clone. We should
++ // TODO: Both this and the two maps below are super expensive to clone. We should
// switch to persistent hash maps if we can at some point or otherwise
// make these much cheaper to clone in general.
activations: Activations,
}
impl RemainingCandidates {
- fn next(&mut self, prev_active: &[Summary], links: &HashSet<String>) -> Option<Candidate> {
- fn next(&mut self, prev_active: &[Summary]) -> Result<Candidate, HashSet<PackageId>> {
++ fn next(&mut self, prev_active: &[Summary], links: &HashSet<String>) -> Result<Candidate, HashSet<PackageId>> {
// Filter the set of candidates based on the previously activated
// versions for this dependency. We can actually use a version if it
// precisely matches an activated version or if it is otherwise
// incompatible with all other activated versions. Note that we
// define "compatible" here in terms of the semver sense where if
// the left-most nonzero digit is the same they're considered
- // compatible.
+ // compatible unless we have a `*-sys` crate (defined by having a
+ // linked attribute) then we can only have one version.
- self.remaining.by_ref().map(|p| p.1).find(|b| {
- prev_active.iter().any(|a| *a == b.summary)
- || (b.summary.links().map(|l| !links.contains(l)).unwrap_or(true)
- && prev_active
- .iter()
- .all(|a| !compatible(a.version(), b.summary.version())))
- })
+ //
+ // When we are done we return the set of previously activated
+ // that conflicted with the ones we tried. If any of these change
+ // then we would have considered different candidates.
+ for (_, b) in self.remaining.by_ref() {
++ if b.summary.links().map(|l| links.contains(l)).unwrap_or(false) {
++ // TODO: self.conflicting_prev_active.insert(___.package_id().clone());
++ continue
++ }
+ if let Some(a) = prev_active.iter().find(|a| compatible(a.version(), b.summary.version())) {
+ if *a != b.summary {
+ self.conflicting_prev_active.insert(a.package_id().clone());
+ continue
+ }
+ }
+ return Ok(b);
+ }
+ Err(self.conflicting_prev_active.clone())
}
}
dep.name(), prev_active.len());
let mut candidates = RemainingCandidates {
remaining: RcVecIter::new(Rc::clone(&candidates)),
+ conflicting_prev_active: HashSet::new(),
};
- (candidates.next(prev_active),
- candidates.clone().next(prev_active).is_ok(),
+ conflicting_activations = HashSet::new();
- candidates.clone().next(prev_active, &cx.links).is_some(),
+ (candidates.next(prev_active, &cx.links),
++ candidates.clone().next(prev_active, &cx.links).is_ok(),
candidates)
};
while let Some(mut frame) = backtrack_stack.pop() {
let (next, has_another) = {
let prev_active = frame.context_backup.prev_active(&frame.dep);
- (frame.remaining_candidates.next(prev_active, &frame.context_backup.links),
- frame.remaining_candidates.clone().next(prev_active, &frame.context_backup.links).is_some())
+ (
- frame.remaining_candidates.next(prev_active),
- frame.remaining_candidates.clone().next(prev_active).is_ok(),
++ frame.remaining_candidates.next(prev_active, &frame.context_backup.links),
++ frame.remaining_candidates.clone().next(prev_active, &frame.context_backup.links).is_ok(),
+ )
};
- if let Some(candidate) = next {
+ if frame.context_backup.is_active(parent.package_id())
+ && conflicting_activations
+ .iter()
+ // note: a lot of redundant work in is_active for similar debs
+ .all(|con| frame.context_backup.is_active(con))
+ {
+ continue;
+ }
+ if let Ok(candidate) = next {
*cur = frame.cur;
if has_another {
*cx = frame.context_backup.clone();
.unwrap_or(&[])
}
- fn is_active(&mut self, id: &PackageId) -> bool {
++ fn is_active(&self, id: &PackageId) -> bool {
+ self.activations.get(id.name())
+ .and_then(|v| v.get(id.source_id()))
+ .map(|v| v.iter().any(|s| s.package_id() == id))
+ .unwrap_or(false)
+ }
+
/// Return all dependencies and the features we want from them.
fn resolve_features<'b>(&mut self,
s: &'b Summary,
("baz", "1.0.1")])));
}
+#[test]
+fn resolving_with_sys_crates() {
+ // This is based on issues/4902
+ // With `l` a normal library we get 2copies so everyone gets the newest compatible.
+ // But `l-sys` a library with a links attribute we make sure there is only one.
+ let reg = registry(vec![
+ pkg!(("l-sys", "0.9.1")),
+ pkg!(("l-sys", "0.10.0")),
+ pkg!(("l", "0.9.1")),
+ pkg!(("l", "0.10.0")),
+ pkg!(("d", "1.0.0") => [dep_req("l-sys", ">=0.8.0, <=0.10.0"), dep_req("l", ">=0.8.0, <=0.10.0")]),
+ pkg!(("r", "1.0.0") => [dep_req("l-sys", "0.9"), dep_req("l", "0.9")]),
+ ]);
+
+ let res = resolve(&pkg_id("root"), vec![
+ dep_req("d", "1"),
+ dep_req("r", "1"),
+ ], ®).unwrap();
+
+ assert_that(&res, contains(names(&[("root", "1.0.0"),
+ ("d", "1.0.0"),
+ ("r", "1.0.0"),
+ ("l-sys", "0.9.1"),
+ ("l", "0.9.1"),
+ ("l", "0.10.0")])));
+}
+
+ #[test]
+ fn resolving_with_constrained_sibling_backtrack_parent() {
+ // There is no point in considering all of the backtrack_trap{1,2}
+ // candidates since they can't change the result of failing to
+ // resolve 'constrained'. Cargo should (ideally) skip past them and resume
+ // resolution once the activation of the parent, 'bar', is rolled back.
+ // Note that the traps are slightly more constrained to make sure they
+ // get picked first.
+ let mut reglist = vec![
+ pkg!(("foo", "1.0.0") => [dep_req("bar", "1.0"),
+ dep_req("constrained", "=1.0.0")]),
+
+ pkg!(("bar", "1.0.0") => [dep_req("backtrack_trap1", "1.0.2"),
+ dep_req("backtrack_trap2", "1.0.2"),
+ dep_req("constrained", "1.0.0")]),
+ pkg!(("constrained", "1.0.0")),
+ pkg!(("backtrack_trap1", "1.0.0")),
+ pkg!(("backtrack_trap2", "1.0.0")),
+ ];
+ // Bump this to make the test harder - it adds more versions of bar that will
+ // fail to resolve, and more versions of the traps to consider.
+ const NUM_BARS_AND_TRAPS: usize = 50; // minimum 2
+ for i in 1..NUM_BARS_AND_TRAPS {
+ let vsn = format!("1.0.{}", i);
+ reglist.push(pkg!(("bar", vsn.clone()) => [dep_req("backtrack_trap1", "1.0.2"),
+ dep_req("backtrack_trap2", "1.0.2"),
+ dep_req("constrained", "1.0.1")]));
+ reglist.push(pkg!(("backtrack_trap1", vsn.clone())));
+ reglist.push(pkg!(("backtrack_trap2", vsn.clone())));
+ reglist.push(pkg!(("constrained", vsn.clone())));
+ }
+ let reg = registry(reglist);
+
+ let res = resolve(&pkg_id("root"), vec![
+ dep_req("foo", "1"),
+ ], ®).unwrap();
+
+ assert_that(&res, contains(names(&[("root", "1.0.0"),
+ ("foo", "1.0.0"),
+ ("bar", "1.0.0"),
+ ("constrained", "1.0.0")])));
+ }
+
+ #[test]
+ fn resolving_with_constrained_sibling_backtrack_activation() {
+ // It makes sense to resolve most-constrained deps first, but
+ // with that logic the backtrack traps here come between the two
+ // attempted resolutions of 'constrained'. When backtracking,
+ // cargo should skip past them and resume resolution once the
+ // number of activations for 'constrained' changes.
+ let mut reglist = vec![
+ pkg!(("foo", "1.0.0") => [dep_req("bar", "=1.0.0"),
+ dep_req("backtrack_trap1", "1.0"),
+ dep_req("backtrack_trap2", "1.0"),
+ dep_req("constrained", "<=1.0.60")]),
+ pkg!(("bar", "1.0.0") => [dep_req("constrained", ">=1.0.60")]),
+ ];
+ // Bump these to make the test harder, but you'll also need to
+ // change the version constraints on `constrained` above. To correctly
+ // exercise Cargo, the relationship between the values is:
+ // NUM_CONSTRAINED - vsn < NUM_TRAPS < vsn
+ // to make sure the traps are resolved between `constrained`.
+ const NUM_TRAPS: usize = 45; // min 1
+ const NUM_CONSTRAINED: usize = 100; // min 1
+ for i in 0..NUM_TRAPS {
+ let vsn = format!("1.0.{}", i);
+ reglist.push(pkg!(("backtrack_trap1", vsn.clone())));
+ reglist.push(pkg!(("backtrack_trap2", vsn.clone())));
+ }
+ for i in 0..NUM_CONSTRAINED {
+ let vsn = format!("1.0.{}", i);
+ reglist.push(pkg!(("constrained", vsn.clone())));
+ }
+ let reg = registry(reglist);
+
+ let res = resolve(&pkg_id("root"), vec![
+ dep_req("foo", "1"),
+ ], ®).unwrap();
+
+ assert_that(&res, contains(names(&[("root", "1.0.0"),
+ ("foo", "1.0.0"),
+ ("bar", "1.0.0"),
+ ("constrained", "1.0.60")])));
+ }
+
+ #[test]
+ fn resolving_with_constrained_sibling_transitive_dep_effects() {
+ // When backtracking due to a failed dependency, if Cargo is
+ // trying to be clever and skip irrelevant dependencies, care must
+ // be taken to not miss the transitive effects of alternatives. E.g.
+ // in the right-to-left resolution of the graph below, B may
+ // affect whether D is successfully resolved.
+ //
+ // A
+ // / | \
+ // B C D
+ // | |
+ // C D
+ let reg = registry(vec![
+ pkg!(("A", "1.0.0") => [dep_req("B", "1.0"),
+ dep_req("C", "1.0"),
+ dep_req("D", "1.0.100")]),
+
+ pkg!(("B", "1.0.0") => [dep_req("C", ">=1.0.0")]),
+ pkg!(("B", "1.0.1") => [dep_req("C", ">=1.0.1")]),
+
+ pkg!(("C", "1.0.0") => [dep_req("D", "1.0.0")]),
+ pkg!(("C", "1.0.1") => [dep_req("D", ">=1.0.1,<1.0.100")]),
+ pkg!(("C", "1.0.2") => [dep_req("D", ">=1.0.2,<1.0.100")]),
+
+ pkg!(("D", "1.0.0")),
+ pkg!(("D", "1.0.1")),
+ pkg!(("D", "1.0.2")),
+ pkg!(("D", "1.0.100")),
+ pkg!(("D", "1.0.101")),
+ pkg!(("D", "1.0.102")),
+ pkg!(("D", "1.0.103")),
+ pkg!(("D", "1.0.104")),
+ pkg!(("D", "1.0.105")),
+ ]);
+
+ let res = resolve(&pkg_id("root"), vec![
+ dep_req("A", "1"),
+ ], ®).unwrap();
+
+ assert_that(&res, contains(names(&[("A", "1.0.0"),
+ ("B", "1.0.0"),
+ ("C", "1.0.0"),
+ ("D", "1.0.105")])));
+ }
+
#[test]
fn resolving_but_no_exists() {
let reg = registry(vec![