From: Dirkjan Ochtman Date: Sat, 28 Apr 2018 15:20:27 +0000 (+0200) Subject: Split Context into two types X-Git-Tag: archive/raspbian/0.35.0-2+rpi1~3^2^2^2^2^2^2^2~22^2~1^2~31^2~3 X-Git-Url: https://dgit.raspbian.org/?a=commitdiff_plain;h=c32e395cad0379bb3b34d3ef746f8fc84c3900ea;p=cargo.git Split Context into two types --- diff --git a/src/cargo/core/compiler/compilation.rs b/src/cargo/core/compiler/compilation.rs index 71bddf20f..ece186c9d 100644 --- a/src/cargo/core/compiler/compilation.rs +++ b/src/cargo/core/compiler/compilation.rs @@ -175,7 +175,7 @@ impl<'cfg> Compilation<'cfg> { // When adding new environment variables depending on // crate properties which might require rebuild upon change // consider adding the corresponding properties to the hash - // in Context::target_metadata() + // in BuildContext::target_metadata() cmd.env("CARGO_MANIFEST_DIR", pkg.root()) .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) diff --git a/src/cargo/core/compiler/context/compilation_files.rs b/src/cargo/core/compiler/context/compilation_files.rs index 9fcacb336..cf3046eec 100644 --- a/src/cargo/core/compiler/context/compilation_files.rs +++ b/src/cargo/core/compiler/context/compilation_files.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use lazycell::LazyCell; -use super::{Context, FileFlavor, Kind, Layout, Unit}; +use super::{BuildContext, Context, FileFlavor, Kind, Layout, Unit}; use core::{TargetKind, Workspace}; use util::{self, CargoResult}; @@ -172,10 +172,10 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { pub(super) fn outputs( &self, unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult>> { self.outputs[unit] - .try_borrow_with(|| self.calc_outputs(unit, cx)) + .try_borrow_with(|| self.calc_outputs(unit, bcx)) .map(Arc::clone) } @@ -230,15 +230,15 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { fn calc_outputs( &self, unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult>> { let out_dir = self.out_dir(unit); let file_stem = self.file_stem(unit); let link_stem = self.link_stem(unit); let info = if unit.target.for_host() { - &cx.host_info + &bcx.host_info } else { - &cx.target_info + &bcx.target_info }; let mut ret = Vec::new(); @@ -268,7 +268,7 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { crate_type, flavor, unit.target.kind(), - cx.build_config.target_triple(), + bcx.build_config.target_triple(), )?; match file_types { @@ -324,14 +324,14 @@ impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { does not support these crate types", unsupported.join(", "), unit.pkg, - cx.build_config.target_triple() + bcx.build_config.target_triple() ) } bail!( "cannot compile `{}` as the target `{}` does not \ support any of the output crate types", unit.pkg, - cx.build_config.target_triple() + bcx.build_config.target_triple() ); } info!("Target filenames: {:?}", ret); @@ -380,10 +380,11 @@ fn compute_metadata<'a, 'cfg>( // This environment variable should not be relied on! It's // just here for rustbuild. We need a more principled method // doing this eventually. + let bcx = &cx.bcx; let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA"); if !(unit.mode.is_any_test() || unit.mode.is_check()) && (unit.target.is_dylib() || unit.target.is_cdylib() - || (unit.target.is_bin() && cx.build_config.target_triple().starts_with("wasm32-"))) + || (unit.target.is_bin() && bcx.build_config.target_triple().starts_with("wasm32-"))) && unit.pkg.package_id().source_id().is_path() && __cargo_default_lib_metadata.is_err() { @@ -396,7 +397,7 @@ fn compute_metadata<'a, 'cfg>( // to pull crates from anywhere w/o worrying about conflicts unit.pkg .package_id() - .stable_hash(cx.ws.root()) + .stable_hash(bcx.ws.root()) .hash(&mut hasher); // Add package properties which map to environment variables @@ -408,7 +409,7 @@ fn compute_metadata<'a, 'cfg>( // Also mix in enabled features to our metadata. This'll ensure that // when changing feature sets each lib is separately cached. - cx.resolve + bcx.resolve .features_sorted(unit.pkg.package_id()) .hash(&mut hasher); @@ -427,7 +428,7 @@ fn compute_metadata<'a, 'cfg>( // settings like debuginfo and whatnot. unit.profile.hash(&mut hasher); unit.mode.hash(&mut hasher); - if let Some(ref args) = cx.extra_args_for(unit) { + if let Some(ref args) = bcx.extra_args_for(unit) { args.hash(&mut hasher); } @@ -441,7 +442,7 @@ fn compute_metadata<'a, 'cfg>( unit.target.name().hash(&mut hasher); unit.target.kind().hash(&mut hasher); - cx.build_config.rustc.verbose_version.hash(&mut hasher); + bcx.build_config.rustc.verbose_version.hash(&mut hasher); // Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present. // This should be the release channel, to get a different hash for each channel. diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index 8992cf719..0eda1caf8 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -71,43 +71,29 @@ pub struct Unit<'a> { } /// The build context, containing all information about a build task -pub struct Context<'a, 'cfg: 'a> { +pub struct BuildContext<'a, 'cfg: 'a> { /// The workspace the build is for pub ws: &'a Workspace<'cfg>, /// The cargo configuration pub config: &'cfg Config, /// The dependency graph for our build pub resolve: &'a Resolve, - /// Information on the compilation output - pub compilation: Compilation<'cfg>, - pub packages: &'a PackageSet<'cfg>, - pub build_state: Arc, - pub build_script_overridden: HashSet<(PackageId, Kind)>, - pub build_explicit_deps: HashMap, BuildDeps>, - pub fingerprints: HashMap, Arc>, - pub compiled: HashSet>, - pub build_config: &'a BuildConfig, - pub build_scripts: HashMap, Arc>, - pub links: Links<'a>, - pub used_in_plugin: HashSet>, - pub jobserver: Client, pub profiles: &'a Profiles, + pub build_config: &'a BuildConfig, /// This is a workaround to carry the extra compiler args for either /// `rustc` or `rustdoc` given on the command-line for the commands `cargo /// rustc` and `cargo rustdoc`. These commands only support one target, /// but we don't want the args passed to any dependencies, so we include /// the `Unit` corresponding to the top-level target. extra_compiler_args: Option<(Unit<'a>, Vec)>, + pub packages: &'a PackageSet<'cfg>, target_info: TargetInfo, host_info: TargetInfo, incremental_env: Option, - - unit_dependencies: HashMap, Vec>>, - files: Option>, } -impl<'a, 'cfg> Context<'a, 'cfg> { +impl<'a, 'cfg> BuildContext<'a, 'cfg> { pub fn new( ws: &'a Workspace<'cfg>, resolve: &'a Resolve, @@ -116,63 +102,205 @@ impl<'a, 'cfg> Context<'a, 'cfg> { build_config: &'a BuildConfig, profiles: &'a Profiles, extra_compiler_args: Option<(Unit<'a>, Vec)>, - ) -> CargoResult> { + ) -> CargoResult> { let incremental_env = match env::var("CARGO_INCREMENTAL") { Ok(v) => Some(v == "1"), Err(_) => None, }; - // Load up the jobserver that we'll use to manage our parallelism. This - // is the same as the GNU make implementation of a jobserver, and - // intentionally so! It's hoped that we can interact with GNU make and - // all share the same jobserver. - // - // Note that if we don't have a jobserver in our environment then we - // create our own, and we create it with `n-1` tokens because one token - // is ourself, a running process. - let jobserver = match config.jobserver_from_env() { - Some(c) => c.clone(), - None => Client::new(build_config.jobs as usize - 1) - .chain_err(|| "failed to create jobserver")?, - }; - let (host_info, target_info) = { - let _p = profile::start("Context::probe_target_info"); + let _p = profile::start("BuildContext::probe_target_info"); debug!("probe_target_info"); let host_info = TargetInfo::new(config, &build_config, Kind::Host)?; let target_info = TargetInfo::new(config, &build_config, Kind::Target)?; (host_info, target_info) }; - let mut cx = Context { + Ok(BuildContext { ws, resolve, packages, config, target_info, host_info, - compilation: Compilation::new(config, build_config.rustc.process()), - build_state: Arc::new(BuildState::new(&build_config)), build_config, - fingerprints: HashMap::new(), profiles, + incremental_env, + extra_compiler_args, + }) + } + + pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { + let deps = { + let a = unit.pkg.package_id(); + let b = dep.pkg.package_id(); + if a == b { + &[] + } else { + self.resolve.dependencies_listed(a, b) + } + }; + + let crate_name = dep.target.crate_name(); + let mut names = deps.iter() + .map(|d| d.rename().unwrap_or(&crate_name)); + let name = names.next().unwrap_or(&crate_name); + for n in names { + if n == name { + continue + } + bail!("multiple dependencies listed for the same crate must \ + all have the same name, but the dependency on `{}` \ + is listed as having different names", dep.pkg.package_id()); + } + Ok(name.to_string()) + } + + /// Whether a dependency should be compiled for the host or target platform, + /// specified by `Kind`. + fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + let platform = match dep.platform() { + Some(p) => p, + None => return true, + }; + let (name, info) = match kind { + Kind::Host => (self.build_config.host_triple(), &self.host_info), + Kind::Target => (self.build_config.target_triple(), &self.target_info), + }; + platform.matches(name, info.cfg()) + } + + /// Gets a package for the given package id. + pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> { + self.packages.get(id) + } + + /// Get the user-specified linker for a particular host or target + pub fn linker(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) + } + + /// Get the user-specified `ar` program for a particular host or target + pub fn ar(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) + } + + /// Get the list of cfg printed out from the compiler for the specified kind + pub fn cfg(&self, kind: Kind) -> &[Cfg] { + let info = match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + }; + info.cfg().unwrap_or(&[]) + } + + /// Get the target configuration for a particular host or target + fn target_config(&self, kind: Kind) -> &TargetConfig { + match kind { + Kind::Host => &self.build_config.host, + Kind::Target => &self.build_config.target, + } + } + + /// Number of jobs specified for this build + pub fn jobs(&self) -> u32 { + self.build_config.jobs + } + + pub fn rustflags_args(&self, unit: &Unit) -> CargoResult> { + env_args( + self.config, + &self.build_config, + self.info(&unit.kind).cfg(), + unit.kind, + "RUSTFLAGS", + ) + } + + pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult> { + env_args( + self.config, + &self.build_config, + self.info(&unit.kind).cfg(), + unit.kind, + "RUSTDOCFLAGS", + ) + } + + pub fn show_warnings(&self, pkg: &PackageId) -> bool { + pkg.source_id().is_path() || self.config.extra_verbose() + } + + fn info(&self, kind: &Kind) -> &TargetInfo { + match *kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } + + pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { + if let Some((ref args_unit, ref args)) = self.extra_compiler_args { + if args_unit == unit { + return Some(args); + } + } + None + } +} + +pub struct Context<'a, 'cfg: 'a> { + pub bcx: &'a BuildContext<'a, 'cfg>, + pub compilation: Compilation<'cfg>, + pub build_state: Arc, + pub build_script_overridden: HashSet<(PackageId, Kind)>, + pub build_explicit_deps: HashMap, BuildDeps>, + pub fingerprints: HashMap, Arc>, + pub compiled: HashSet>, + pub build_scripts: HashMap, Arc>, + pub links: Links<'a>, + pub used_in_plugin: HashSet>, + pub jobserver: Client, + unit_dependencies: HashMap, Vec>>, + files: Option>, +} + +impl<'a, 'cfg> Context<'a, 'cfg> { + pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { + // Load up the jobserver that we'll use to manage our parallelism. This + // is the same as the GNU make implementation of a jobserver, and + // intentionally so! It's hoped that we can interact with GNU make and + // all share the same jobserver. + // + // Note that if we don't have a jobserver in our environment then we + // create our own, and we create it with `n-1` tokens because one token + // is ourself, a running process. + let jobserver = match config.jobserver_from_env() { + Some(c) => c.clone(), + None => Client::new(bcx.build_config.jobs as usize - 1) + .chain_err(|| "failed to create jobserver")?, + }; + + let mut compilation = Compilation::new(config, bcx.build_config.rustc.process()); + compilation.host_dylib_path = bcx.host_info.sysroot_libdir.clone(); + compilation.target_dylib_path = bcx.target_info.sysroot_libdir.clone(); + Ok(Self { + bcx, + compilation, + build_state: Arc::new(BuildState::new(&bcx.build_config)), + fingerprints: HashMap::new(), compiled: HashSet::new(), build_scripts: HashMap::new(), build_explicit_deps: HashMap::new(), links: Links::new(), used_in_plugin: HashSet::new(), - incremental_env, jobserver, build_script_overridden: HashSet::new(), unit_dependencies: HashMap::new(), files: None, - extra_compiler_args, - }; - - cx.compilation.host_dylib_path = cx.host_info.sysroot_libdir.clone(); - cx.compilation.target_dylib_path = cx.target_info.sysroot_libdir.clone(); - Ok(cx) + }) } // Returns a mapping of the root package plus its immediate dependencies to @@ -183,7 +311,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { export_dir: Option, exec: &Arc, ) -> CargoResult> { - let mut queue = JobQueue::new(&self); + let mut queue = JobQueue::new(self.bcx); self.prepare_units(export_dir, units)?; self.prepare()?; self.build_used_in_plugin_map(units)?; @@ -264,7 +392,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { ); } - let feats = self.resolve.features(unit.pkg.package_id()); + let feats = self.bcx.resolve.features(unit.pkg.package_id()); if !feats.is_empty() { self.compilation .cfgs @@ -276,7 +404,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { .collect() }); } - let rustdocflags = self.rustdocflags_args(unit)?; + let rustdocflags = self.bcx.rustdocflags_args(unit)?; if !rustdocflags.is_empty() { self.compilation .rustdocflags @@ -304,8 +432,8 @@ impl<'a, 'cfg> Context<'a, 'cfg> { self.compilation.native_dirs.insert(dir.clone()); } } - self.compilation.host = self.build_config.host_triple().to_string(); - self.compilation.target = self.build_config.target_triple().to_string(); + self.compilation.host = self.bcx.build_config.host_triple().to_string(); + self.compilation.target = self.bcx.build_config.target_triple().to_string(); Ok(self.compilation) } @@ -314,21 +442,27 @@ impl<'a, 'cfg> Context<'a, 'cfg> { export_dir: Option, units: &[Unit<'a>], ) -> CargoResult<()> { - let dest = if self.build_config.release { + let dest = if self.bcx.build_config.release { "release" } else { "debug" }; - let host_layout = Layout::new(self.ws, None, dest)?; - let target_layout = match self.build_config.requested_target.as_ref() { - Some(target) => Some(Layout::new(self.ws, Some(target), dest)?), + let host_layout = Layout::new(self.bcx.ws, None, dest)?; + let target_layout = match self.bcx.build_config.requested_target.as_ref() { + Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?), None => None, }; - let deps = build_unit_dependencies(units, self)?; + let deps = build_unit_dependencies(units, self.bcx)?; self.unit_dependencies = deps; - let files = - CompilationFiles::new(units, host_layout, target_layout, export_dir, self.ws, self); + let files = CompilationFiles::new( + units, + host_layout, + target_layout, + export_dir, + self.bcx.ws, + self, + ); self.files = Some(files); Ok(()) } @@ -403,7 +537,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> { /// - link_dst: Optional file to link/copy the result to (without metadata suffix) /// - linkable: Whether possible to link against file (eg it's a library) pub fn outputs(&mut self, unit: &Unit<'a>) -> CargoResult>> { - self.files.as_ref().unwrap().outputs(unit, self) + self.files.as_ref().unwrap().outputs(unit, self.bcx) } /// For a package, return all targets which are registered as dependencies @@ -429,85 +563,6 @@ impl<'a, 'cfg> Context<'a, 'cfg> { self.unit_dependencies[unit].clone() } - pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { - let deps = { - let a = unit.pkg.package_id(); - let b = dep.pkg.package_id(); - if a == b { - &[] - } else { - self.resolve.dependencies_listed(a, b) - } - }; - - let crate_name = dep.target.crate_name(); - let mut names = deps.iter() - .map(|d| d.rename().unwrap_or(&crate_name)); - let name = names.next().unwrap_or(&crate_name); - for n in names { - if n == name { - continue - } - bail!("multiple dependencies listed for the same crate must \ - all have the same name, but the dependency on `{}` \ - is listed as having different names", dep.pkg.package_id()); - } - Ok(name.to_string()) - } - - /// Whether a dependency should be compiled for the host or target platform, - /// specified by `Kind`. - fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { - // If this dependency is only available for certain platforms, - // make sure we're only enabling it for that platform. - let platform = match dep.platform() { - Some(p) => p, - None => return true, - }; - let (name, info) = match kind { - Kind::Host => (self.build_config.host_triple(), &self.host_info), - Kind::Target => (self.build_config.target_triple(), &self.target_info), - }; - platform.matches(name, info.cfg()) - } - - /// Gets a package for the given package id. - pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> { - self.packages.get(id) - } - - /// Get the user-specified linker for a particular host or target - pub fn linker(&self, kind: Kind) -> Option<&Path> { - self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) - } - - /// Get the user-specified `ar` program for a particular host or target - pub fn ar(&self, kind: Kind) -> Option<&Path> { - self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) - } - - /// Get the list of cfg printed out from the compiler for the specified kind - pub fn cfg(&self, kind: Kind) -> &[Cfg] { - let info = match kind { - Kind::Host => &self.host_info, - Kind::Target => &self.target_info, - }; - info.cfg().unwrap_or(&[]) - } - - /// Get the target configuration for a particular host or target - fn target_config(&self, kind: Kind) -> &TargetConfig { - match kind { - Kind::Host => &self.build_config.host, - Kind::Target => &self.build_config.target, - } - } - - /// Number of jobs specified for this build - pub fn jobs(&self) -> u32 { - self.build_config.jobs - } - pub fn incremental_args(&self, unit: &Unit) -> CargoResult> { // There's a number of ways to configure incremental compilation right // now. In order of descending priority (first is highest priority) we @@ -530,8 +585,15 @@ impl<'a, 'cfg> Context<'a, 'cfg> { // incremental compilation or not. Primarily development profiles // have it enabled by default while release profiles have it disabled // by default. - let global_cfg = self.config.get_bool("build.incremental")?.map(|c| c.val); - let incremental = match (self.incremental_env, global_cfg, unit.profile.incremental) { + let global_cfg = self.bcx + .config + .get_bool("build.incremental")? + .map(|c| c.val); + let incremental = match ( + self.bcx.incremental_env, + global_cfg, + unit.profile.incremental, + ) { (Some(v), _, _) => v, (None, Some(false), _) => false, (None, _, other) => other, @@ -554,46 +616,6 @@ impl<'a, 'cfg> Context<'a, 'cfg> { let dir = self.files().layout(unit.kind).incremental().display(); Ok(vec!["-C".to_string(), format!("incremental={}", dir)]) } - - pub fn rustflags_args(&self, unit: &Unit) -> CargoResult> { - env_args( - self.config, - &self.build_config, - self.info(&unit.kind).cfg(), - unit.kind, - "RUSTFLAGS", - ) - } - - pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult> { - env_args( - self.config, - &self.build_config, - self.info(&unit.kind).cfg(), - unit.kind, - "RUSTDOCFLAGS", - ) - } - - pub fn show_warnings(&self, pkg: &PackageId) -> bool { - pkg.source_id().is_path() || self.config.extra_verbose() - } - - fn info(&self, kind: &Kind) -> &TargetInfo { - match *kind { - Kind::Host => &self.host_info, - Kind::Target => &self.target_info, - } - } - - pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { - if let Some((ref args_unit, ref args)) = self.extra_compiler_args { - if args_unit == unit { - return Some(args); - } - } - None - } } /// Acquire extra flags to pass to the compiler from various locations. diff --git a/src/cargo/core/compiler/context/unit_dependencies.rs b/src/cargo/core/compiler/context/unit_dependencies.rs index 84ee755b7..08d7ed410 100644 --- a/src/cargo/core/compiler/context/unit_dependencies.rs +++ b/src/cargo/core/compiler/context/unit_dependencies.rs @@ -15,7 +15,7 @@ //! (for example, with and without tests), so we actually build a dependency //! graph of `Unit`s, which capture these properties. -use super::{Context, Kind, Unit}; +use super::{BuildContext, Kind, Unit}; use core::dependency::Kind as DepKind; use core::profiles::ProfileFor; use core::{Package, Target}; @@ -25,7 +25,7 @@ use CargoResult; pub fn build_unit_dependencies<'a, 'cfg>( roots: &[Unit<'a>], - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult, Vec>>> { let mut deps = HashMap::new(); for unit in roots.iter() { @@ -35,12 +35,12 @@ pub fn build_unit_dependencies<'a, 'cfg>( // cleared, and avoid building the lib thrice (once with `panic`, once // without, once for --test). In particular, the lib included for // doctests and examples are `Build` mode here. - let profile_for = if unit.mode.is_any_test() || cx.build_config.test { + let profile_for = if unit.mode.is_any_test() || bcx.build_config.test { ProfileFor::TestDependency } else { ProfileFor::Any }; - deps_of(unit, cx, &mut deps, profile_for)?; + deps_of(unit, bcx, &mut deps, profile_for)?; } Ok(deps) @@ -48,7 +48,7 @@ pub fn build_unit_dependencies<'a, 'cfg>( fn deps_of<'a, 'b, 'cfg>( unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, deps: &'b mut HashMap, Vec>>, profile_for: ProfileFor, ) -> CargoResult<&'b [Unit<'a>]> { @@ -59,11 +59,11 @@ fn deps_of<'a, 'b, 'cfg>( // requested unit's settings are the same as `Any`, `CustomBuild` can't // affect anything else in the hierarchy. if !deps.contains_key(unit) { - let unit_deps = compute_deps(unit, cx, deps, profile_for)?; + let unit_deps = compute_deps(unit, bcx, deps, profile_for)?; let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect(); deps.insert(*unit, to_insert); for (unit, profile_for) in unit_deps { - deps_of(&unit, cx, deps, profile_for)?; + deps_of(&unit, bcx, deps, profile_for)?; } } Ok(deps[unit].as_ref()) @@ -75,19 +75,19 @@ fn deps_of<'a, 'b, 'cfg>( /// is the profile type that should be used for dependencies of the unit. fn compute_deps<'a, 'b, 'cfg>( unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, deps: &'b mut HashMap, Vec>>, profile_for: ProfileFor, ) -> CargoResult, ProfileFor)>> { if unit.mode.is_run_custom_build() { - return compute_deps_custom_build(unit, cx, deps); + return compute_deps_custom_build(unit, bcx, deps); } else if unit.mode.is_doc() && !unit.mode.is_any_test() { // Note: This does not include Doctest. - return compute_deps_doc(unit, cx); + return compute_deps_doc(unit, bcx); } let id = unit.pkg.package_id(); - let deps = cx.resolve.deps(id); + let deps = bcx.resolve.deps(id); let mut ret = deps.filter(|&(_id, deps)| { assert!(deps.len() > 0); deps.iter().any(|dep| { @@ -108,13 +108,13 @@ fn compute_deps<'a, 'b, 'cfg>( // If this dependency is only available for certain platforms, // make sure we're only enabling it for that platform. - if !cx.dep_platform_activated(dep, unit.kind) { + if !bcx.dep_platform_activated(dep, unit.kind) { return false; } // If the dependency is optional, then we're only activating it // if the corresponding feature was activated - if dep.is_optional() && !cx.resolve.features(id).contains(&*dep.name()) { + if dep.is_optional() && !bcx.resolve.features(id).contains(&*dep.name()) { return false; } @@ -122,10 +122,10 @@ fn compute_deps<'a, 'b, 'cfg>( // actually used! true }) - }).filter_map(|(id, _)| match cx.get_package(id) { + }).filter_map(|(id, _)| match bcx.get_package(id) { Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| { let mode = check_or_build_mode(&unit.mode, t); - let unit = new_unit(cx, pkg, t, profile_for, unit.kind.for_target(t), mode); + let unit = new_unit(bcx, pkg, t, profile_for, unit.kind.for_target(t), mode); Ok((unit, profile_for)) }), Err(e) => Some(Err(e)), @@ -138,7 +138,7 @@ fn compute_deps<'a, 'b, 'cfg>( if unit.target.is_custom_build() { return Ok(ret); } - ret.extend(dep_build_script(unit, cx)); + ret.extend(dep_build_script(unit, bcx)); // If this target is a binary, test, example, etc, then it depends on // the library of the same package. The call to `resolve.deps` above @@ -147,7 +147,7 @@ fn compute_deps<'a, 'b, 'cfg>( if unit.target.is_lib() && unit.mode != CompileMode::Doctest { return Ok(ret); } - ret.extend(maybe_lib(unit, cx, profile_for)); + ret.extend(maybe_lib(unit, bcx, profile_for)); Ok(ret) } @@ -158,7 +158,7 @@ fn compute_deps<'a, 'b, 'cfg>( /// the returned set of units must all be run before `unit` is run. fn compute_deps_custom_build<'a, 'cfg>( unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, deps: &mut HashMap, Vec>>, ) -> CargoResult, ProfileFor)>> { // When not overridden, then the dependencies to run a build script are: @@ -178,17 +178,17 @@ fn compute_deps_custom_build<'a, 'cfg>( kind: unit.kind, mode: CompileMode::Build, }; - let deps = deps_of(&tmp, cx, deps, ProfileFor::Any)?; + let deps = deps_of(&tmp, bcx, deps, ProfileFor::Any)?; Ok(deps.iter() .filter_map(|unit| { if !unit.target.linkable() || unit.pkg.manifest().links().is_none() { return None; } - dep_build_script(unit, cx) + dep_build_script(unit, bcx) }) .chain(Some(( new_unit( - cx, + bcx, unit.pkg, unit.target, ProfileFor::CustomBuild, @@ -205,17 +205,17 @@ fn compute_deps_custom_build<'a, 'cfg>( /// Returns the dependencies necessary to document a package fn compute_deps_doc<'a, 'cfg>( unit: &Unit<'a>, - cx: &Context<'a, 'cfg>, + bcx: &BuildContext<'a, 'cfg>, ) -> CargoResult, ProfileFor)>> { - let deps = cx.resolve + let deps = bcx.resolve .deps(unit.pkg.package_id()) .filter(|&(_id, deps)| { deps.iter().any(|dep| match dep.kind() { - DepKind::Normal => cx.dep_platform_activated(dep, unit.kind), + DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind), _ => false, }) }) - .map(|(id, _deps)| cx.get_package(id)); + .map(|(id, _deps)| bcx.get_package(id)); // To document a library, we depend on dependencies actually being // built. If we're documenting *all* libraries, then we also depend on @@ -231,7 +231,7 @@ fn compute_deps_doc<'a, 'cfg>( // However, for plugins/proc-macros, deps should be built like normal. let mode = check_or_build_mode(&unit.mode, lib); let lib_unit = new_unit( - cx, + bcx, dep, lib, ProfileFor::Any, @@ -242,7 +242,7 @@ fn compute_deps_doc<'a, 'cfg>( if let CompileMode::Doc { deps: true } = unit.mode { // Document this lib as well. let doc_unit = new_unit( - cx, + bcx, dep, lib, ProfileFor::Any, @@ -254,23 +254,23 @@ fn compute_deps_doc<'a, 'cfg>( } // Be sure to build/run the build script for documented libraries as - ret.extend(dep_build_script(unit, cx)); + ret.extend(dep_build_script(unit, bcx)); // If we document a binary, we need the library available if unit.target.is_bin() { - ret.extend(maybe_lib(unit, cx, ProfileFor::Any)); + ret.extend(maybe_lib(unit, bcx, ProfileFor::Any)); } Ok(ret) } fn maybe_lib<'a>( unit: &Unit<'a>, - cx: &Context, + bcx: &BuildContext, profile_for: ProfileFor, ) -> Option<(Unit<'a>, ProfileFor)> { let mode = check_or_build_mode(&unit.mode, unit.target); unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { - let unit = new_unit(cx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode); + let unit = new_unit(bcx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode); (unit, profile_for) }) } @@ -282,7 +282,7 @@ fn maybe_lib<'a>( /// script itself doesn't have any dependencies, so even in that case a unit /// of work is still returned. `None` is only returned if the package has no /// build script. -fn dep_build_script<'a>(unit: &Unit<'a>, cx: &Context) -> Option<(Unit<'a>, ProfileFor)> { +fn dep_build_script<'a>(unit: &Unit<'a>, bcx: &BuildContext) -> Option<(Unit<'a>, ProfileFor)> { unit.pkg .targets() .iter() @@ -294,7 +294,7 @@ fn dep_build_script<'a>(unit: &Unit<'a>, cx: &Context) -> Option<(Unit<'a>, Prof Unit { pkg: unit.pkg, target: t, - profile: cx.profiles.get_profile_run_custom_build(&unit.profile), + profile: bcx.profiles.get_profile_run_custom_build(&unit.profile), kind: unit.kind, mode: CompileMode::RunCustomBuild, }, @@ -322,19 +322,19 @@ fn check_or_build_mode(mode: &CompileMode, target: &Target) -> CompileMode { } fn new_unit<'a>( - cx: &Context, + bcx: &BuildContext, pkg: &'a Package, target: &'a Target, profile_for: ProfileFor, kind: Kind, mode: CompileMode, ) -> Unit<'a> { - let profile = cx.profiles.get_profile( + let profile = bcx.profiles.get_profile( &pkg.name(), - cx.ws.is_member(pkg), + bcx.ws.is_member(pkg), profile_for, mode, - cx.build_config.release, + bcx.build_config.release, ); Unit { pkg, diff --git a/src/cargo/core/compiler/custom_build.rs b/src/cargo/core/compiler/custom_build.rs index 59cb9b81a..b23b1256d 100644 --- a/src/cargo/core/compiler/custom_build.rs +++ b/src/cargo/core/compiler/custom_build.rs @@ -103,6 +103,7 @@ pub fn prepare<'a, 'cfg>( fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> { assert!(unit.mode.is_run_custom_build()); + let bcx = &cx.bcx; let dependencies = cx.dep_targets(unit); let build_script_unit = dependencies .iter() @@ -126,30 +127,30 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes let debug = unit.profile.debuginfo.unwrap_or(0) != 0; cmd.env("OUT_DIR", &build_output) .env("CARGO_MANIFEST_DIR", unit.pkg.root()) - .env("NUM_JOBS", &cx.jobs().to_string()) + .env("NUM_JOBS", &bcx.jobs().to_string()) .env( "TARGET", &match unit.kind { - Kind::Host => &cx.build_config.host_triple(), - Kind::Target => cx.build_config.target_triple(), + Kind::Host => &bcx.build_config.host_triple(), + Kind::Target => bcx.build_config.target_triple(), }, ) .env("DEBUG", debug.to_string()) .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) .env( "PROFILE", - if cx.build_config.release { + if bcx.build_config.release { "release" } else { "debug" }, ) - .env("HOST", &cx.build_config.host_triple()) - .env("RUSTC", &cx.build_config.rustc.path) - .env("RUSTDOC", &*cx.config.rustdoc()?) + .env("HOST", &bcx.build_config.host_triple()) + .env("RUSTC", &bcx.build_config.rustc.path) + .env("RUSTDOC", &*bcx.config.rustdoc()?) .inherit_jobserver(&cx.jobserver); - if let Some(ref linker) = cx.build_config.target.linker { + if let Some(ref linker) = bcx.build_config.target.linker { cmd.env("RUSTC_LINKER", linker); } @@ -159,12 +160,12 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes // Be sure to pass along all enabled features for this package, this is the // last piece of statically known information that we have. - for feat in cx.resolve.features(unit.pkg.package_id()).iter() { + for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); } let mut cfg_map = HashMap::new(); - for cfg in cx.cfg(unit.kind) { + for cfg in bcx.cfg(unit.kind) { match *cfg { Cfg::Name(ref n) => { cfg_map.insert(n.clone(), None); @@ -230,7 +231,7 @@ fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoRes ); let build_scripts = super::load_build_deps(cx, unit); let kind = unit.kind; - let json_messages = cx.build_config.json_messages; + let json_messages = bcx.build_config.json_messages; // Check to see if the build script has already run, and if it has keep // track of whether it has told us about some explicit dependencies diff --git a/src/cargo/core/compiler/fingerprint.rs b/src/cargo/core/compiler/fingerprint.rs index 73bd44bf3..6eba51405 100644 --- a/src/cargo/core/compiler/fingerprint.rs +++ b/src/cargo/core/compiler/fingerprint.rs @@ -15,7 +15,7 @@ use util::errors::{CargoResult, CargoResultExt}; use util::paths; use util::{internal, profile, Dirty, Fresh, Freshness}; -use super::context::{Context, FileFlavor, Unit}; +use super::context::{BuildContext, Context, FileFlavor, Unit}; use super::custom_build::BuildDeps; use super::job::Work; @@ -56,6 +56,7 @@ pub fn prepare_target<'a, 'cfg>( unit.pkg.package_id(), unit.target.name() )); + let bcx = cx.bcx; let new = cx.files().fingerprint_dir(unit); let loc = new.join(&filename(cx, unit)); @@ -77,7 +78,7 @@ pub fn prepare_target<'a, 'cfg>( // changed then an error is issued. if compare.is_err() { let source_id = unit.pkg.package_id().source_id(); - let sources = cx.packages.sources(); + let sources = bcx.packages.sources(); let source = sources .get(source_id) .ok_or_else(|| internal("missing package source"))?; @@ -102,7 +103,7 @@ pub fn prepare_target<'a, 'cfg>( } } - let allow_failure = cx.extra_args_for(unit).is_some(); + let allow_failure = bcx.extra_args_for(unit).is_some(); let target_root = cx.files().target_root().to_path_buf(); let write_fingerprint = Work::new(move |_| { match fingerprint.update_local(&target_root) { @@ -414,6 +415,7 @@ fn calculate<'a, 'cfg>( cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>, ) -> CargoResult> { + let bcx = cx.bcx; if let Some(s) = cx.fingerprints.get(unit) { return Ok(Arc::clone(s)); } @@ -430,7 +432,7 @@ fn calculate<'a, 'cfg>( .filter(|u| !u.target.is_custom_build() && !u.target.is_bin()) .map(|dep| { calculate(cx, dep).and_then(|fingerprint| { - let name = cx.extern_crate_name(unit, dep)?; + let name = cx.bcx.extern_crate_name(unit, dep)?; Ok((dep.pkg.package_id().to_string(), name, fingerprint)) }) }) @@ -442,30 +444,30 @@ fn calculate<'a, 'cfg>( let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?; LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info) } else { - let fingerprint = pkg_fingerprint(cx, unit.pkg)?; + let fingerprint = pkg_fingerprint(&cx.bcx, unit.pkg)?; LocalFingerprint::Precalculated(fingerprint) }; let mut deps = deps; deps.sort_by(|&(ref a, _, _), &(ref b, _, _)| a.cmp(b)); let extra_flags = if unit.mode.is_doc() { - cx.rustdocflags_args(unit)? + bcx.rustdocflags_args(unit)? } else { - cx.rustflags_args(unit)? + bcx.rustflags_args(unit)? }; let profile_hash = util::hash_u64(&( &unit.profile, unit.mode, - cx.extra_args_for(unit), + bcx.extra_args_for(unit), cx.incremental_args(unit)?, )); let fingerprint = Arc::new(Fingerprint { - rustc: util::hash_u64(&cx.build_config.rustc.verbose_version), + rustc: util::hash_u64(&bcx.build_config.rustc.verbose_version), target: util::hash_u64(&unit.target), profile: profile_hash, // Note that .0 is hashed here, not .1 which is the cwd. That doesn't // actually affect the output artifact so there's no need to hash it. - path: util::hash_u64(&super::path_args(cx, unit).0), - features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())), + path: util::hash_u64(&super::path_args(&cx.bcx, unit).0), + features: format!("{:?}", bcx.resolve.features_sorted(unit.pkg.package_id())), deps, local: vec![local], memoized_hash: Mutex::new(None), @@ -591,7 +593,7 @@ fn build_script_local_fingerprints<'a, 'cfg>( let output = deps.build_script_output.clone(); if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { debug!("old local fingerprints deps"); - let s = pkg_fingerprint(cx, unit.pkg)?; + let s = pkg_fingerprint(&cx.bcx, unit.pkg)?; return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output))); } @@ -705,9 +707,9 @@ fn dep_info_mtime_if_fresh(pkg: &Package, dep_info: &Path) -> CargoResult