// When adding new environment variables depending on
// crate properties which might require rebuild upon change
// consider adding the corresponding properties to the hash
- // in Context::target_metadata()
+ // in BuildContext::target_metadata()
cmd.env("CARGO_MANIFEST_DIR", pkg.root())
.env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string())
.env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string())
use lazycell::LazyCell;
-use super::{Context, FileFlavor, Kind, Layout, Unit};
+use super::{BuildContext, Context, FileFlavor, Kind, Layout, Unit};
use core::{TargetKind, Workspace};
use util::{self, CargoResult};
pub(super) fn outputs(
&self,
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
) -> CargoResult<Arc<Vec<OutputFile>>> {
self.outputs[unit]
- .try_borrow_with(|| self.calc_outputs(unit, cx))
+ .try_borrow_with(|| self.calc_outputs(unit, bcx))
.map(Arc::clone)
}
fn calc_outputs(
&self,
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
) -> CargoResult<Arc<Vec<OutputFile>>> {
let out_dir = self.out_dir(unit);
let file_stem = self.file_stem(unit);
let link_stem = self.link_stem(unit);
let info = if unit.target.for_host() {
- &cx.host_info
+ &bcx.host_info
} else {
- &cx.target_info
+ &bcx.target_info
};
let mut ret = Vec::new();
crate_type,
flavor,
unit.target.kind(),
- cx.build_config.target_triple(),
+ bcx.build_config.target_triple(),
)?;
match file_types {
does not support these crate types",
unsupported.join(", "),
unit.pkg,
- cx.build_config.target_triple()
+ bcx.build_config.target_triple()
)
}
bail!(
"cannot compile `{}` as the target `{}` does not \
support any of the output crate types",
unit.pkg,
- cx.build_config.target_triple()
+ bcx.build_config.target_triple()
);
}
info!("Target filenames: {:?}", ret);
// This environment variable should not be relied on! It's
// just here for rustbuild. We need a more principled method
// doing this eventually.
+ let bcx = &cx.bcx;
let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA");
if !(unit.mode.is_any_test() || unit.mode.is_check())
&& (unit.target.is_dylib() || unit.target.is_cdylib()
- || (unit.target.is_bin() && cx.build_config.target_triple().starts_with("wasm32-")))
+ || (unit.target.is_bin() && bcx.build_config.target_triple().starts_with("wasm32-")))
&& unit.pkg.package_id().source_id().is_path()
&& __cargo_default_lib_metadata.is_err()
{
// to pull crates from anywhere w/o worrying about conflicts
unit.pkg
.package_id()
- .stable_hash(cx.ws.root())
+ .stable_hash(bcx.ws.root())
.hash(&mut hasher);
// Add package properties which map to environment variables
// Also mix in enabled features to our metadata. This'll ensure that
// when changing feature sets each lib is separately cached.
- cx.resolve
+ bcx.resolve
.features_sorted(unit.pkg.package_id())
.hash(&mut hasher);
// settings like debuginfo and whatnot.
unit.profile.hash(&mut hasher);
unit.mode.hash(&mut hasher);
- if let Some(ref args) = cx.extra_args_for(unit) {
+ if let Some(ref args) = bcx.extra_args_for(unit) {
args.hash(&mut hasher);
}
unit.target.name().hash(&mut hasher);
unit.target.kind().hash(&mut hasher);
- cx.build_config.rustc.verbose_version.hash(&mut hasher);
+ bcx.build_config.rustc.verbose_version.hash(&mut hasher);
// Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present.
// This should be the release channel, to get a different hash for each channel.
}
/// The build context, containing all information about a build task
-pub struct Context<'a, 'cfg: 'a> {
+pub struct BuildContext<'a, 'cfg: 'a> {
/// The workspace the build is for
pub ws: &'a Workspace<'cfg>,
/// The cargo configuration
pub config: &'cfg Config,
/// The dependency graph for our build
pub resolve: &'a Resolve,
- /// Information on the compilation output
- pub compilation: Compilation<'cfg>,
- pub packages: &'a PackageSet<'cfg>,
- pub build_state: Arc<BuildState>,
- pub build_script_overridden: HashSet<(PackageId, Kind)>,
- pub build_explicit_deps: HashMap<Unit<'a>, BuildDeps>,
- pub fingerprints: HashMap<Unit<'a>, Arc<Fingerprint>>,
- pub compiled: HashSet<Unit<'a>>,
- pub build_config: &'a BuildConfig,
- pub build_scripts: HashMap<Unit<'a>, Arc<BuildScripts>>,
- pub links: Links<'a>,
- pub used_in_plugin: HashSet<Unit<'a>>,
- pub jobserver: Client,
pub profiles: &'a Profiles,
+ pub build_config: &'a BuildConfig,
/// This is a workaround to carry the extra compiler args for either
/// `rustc` or `rustdoc` given on the command-line for the commands `cargo
/// rustc` and `cargo rustdoc`. These commands only support one target,
/// but we don't want the args passed to any dependencies, so we include
/// the `Unit` corresponding to the top-level target.
extra_compiler_args: Option<(Unit<'a>, Vec<String>)>,
+ pub packages: &'a PackageSet<'cfg>,
target_info: TargetInfo,
host_info: TargetInfo,
incremental_env: Option<bool>,
-
- unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
- files: Option<CompilationFiles<'a, 'cfg>>,
}
-impl<'a, 'cfg> Context<'a, 'cfg> {
+impl<'a, 'cfg> BuildContext<'a, 'cfg> {
pub fn new(
ws: &'a Workspace<'cfg>,
resolve: &'a Resolve,
build_config: &'a BuildConfig,
profiles: &'a Profiles,
extra_compiler_args: Option<(Unit<'a>, Vec<String>)>,
- ) -> CargoResult<Context<'a, 'cfg>> {
+ ) -> CargoResult<BuildContext<'a, 'cfg>> {
let incremental_env = match env::var("CARGO_INCREMENTAL") {
Ok(v) => Some(v == "1"),
Err(_) => None,
};
- // Load up the jobserver that we'll use to manage our parallelism. This
- // is the same as the GNU make implementation of a jobserver, and
- // intentionally so! It's hoped that we can interact with GNU make and
- // all share the same jobserver.
- //
- // Note that if we don't have a jobserver in our environment then we
- // create our own, and we create it with `n-1` tokens because one token
- // is ourself, a running process.
- let jobserver = match config.jobserver_from_env() {
- Some(c) => c.clone(),
- None => Client::new(build_config.jobs as usize - 1)
- .chain_err(|| "failed to create jobserver")?,
- };
-
let (host_info, target_info) = {
- let _p = profile::start("Context::probe_target_info");
+ let _p = profile::start("BuildContext::probe_target_info");
debug!("probe_target_info");
let host_info = TargetInfo::new(config, &build_config, Kind::Host)?;
let target_info = TargetInfo::new(config, &build_config, Kind::Target)?;
(host_info, target_info)
};
- let mut cx = Context {
+ Ok(BuildContext {
ws,
resolve,
packages,
config,
target_info,
host_info,
- compilation: Compilation::new(config, build_config.rustc.process()),
- build_state: Arc::new(BuildState::new(&build_config)),
build_config,
- fingerprints: HashMap::new(),
profiles,
+ incremental_env,
+ extra_compiler_args,
+ })
+ }
+
+ pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult<String> {
+ let deps = {
+ let a = unit.pkg.package_id();
+ let b = dep.pkg.package_id();
+ if a == b {
+ &[]
+ } else {
+ self.resolve.dependencies_listed(a, b)
+ }
+ };
+
+ let crate_name = dep.target.crate_name();
+ let mut names = deps.iter()
+ .map(|d| d.rename().unwrap_or(&crate_name));
+ let name = names.next().unwrap_or(&crate_name);
+ for n in names {
+ if n == name {
+ continue
+ }
+ bail!("multiple dependencies listed for the same crate must \
+ all have the same name, but the dependency on `{}` \
+ is listed as having different names", dep.pkg.package_id());
+ }
+ Ok(name.to_string())
+ }
+
+ /// Whether a dependency should be compiled for the host or target platform,
+ /// specified by `Kind`.
+ fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool {
+ // If this dependency is only available for certain platforms,
+ // make sure we're only enabling it for that platform.
+ let platform = match dep.platform() {
+ Some(p) => p,
+ None => return true,
+ };
+ let (name, info) = match kind {
+ Kind::Host => (self.build_config.host_triple(), &self.host_info),
+ Kind::Target => (self.build_config.target_triple(), &self.target_info),
+ };
+ platform.matches(name, info.cfg())
+ }
+
+ /// Gets a package for the given package id.
+ pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
+ self.packages.get(id)
+ }
+
+ /// Get the user-specified linker for a particular host or target
+ pub fn linker(&self, kind: Kind) -> Option<&Path> {
+ self.target_config(kind).linker.as_ref().map(|s| s.as_ref())
+ }
+
+ /// Get the user-specified `ar` program for a particular host or target
+ pub fn ar(&self, kind: Kind) -> Option<&Path> {
+ self.target_config(kind).ar.as_ref().map(|s| s.as_ref())
+ }
+
+ /// Get the list of cfg printed out from the compiler for the specified kind
+ pub fn cfg(&self, kind: Kind) -> &[Cfg] {
+ let info = match kind {
+ Kind::Host => &self.host_info,
+ Kind::Target => &self.target_info,
+ };
+ info.cfg().unwrap_or(&[])
+ }
+
+ /// Get the target configuration for a particular host or target
+ fn target_config(&self, kind: Kind) -> &TargetConfig {
+ match kind {
+ Kind::Host => &self.build_config.host,
+ Kind::Target => &self.build_config.target,
+ }
+ }
+
+ /// Number of jobs specified for this build
+ pub fn jobs(&self) -> u32 {
+ self.build_config.jobs
+ }
+
+ pub fn rustflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
+ env_args(
+ self.config,
+ &self.build_config,
+ self.info(&unit.kind).cfg(),
+ unit.kind,
+ "RUSTFLAGS",
+ )
+ }
+
+ pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
+ env_args(
+ self.config,
+ &self.build_config,
+ self.info(&unit.kind).cfg(),
+ unit.kind,
+ "RUSTDOCFLAGS",
+ )
+ }
+
+ pub fn show_warnings(&self, pkg: &PackageId) -> bool {
+ pkg.source_id().is_path() || self.config.extra_verbose()
+ }
+
+ fn info(&self, kind: &Kind) -> &TargetInfo {
+ match *kind {
+ Kind::Host => &self.host_info,
+ Kind::Target => &self.target_info,
+ }
+ }
+
+ pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec<String>> {
+ if let Some((ref args_unit, ref args)) = self.extra_compiler_args {
+ if args_unit == unit {
+ return Some(args);
+ }
+ }
+ None
+ }
+}
+
+pub struct Context<'a, 'cfg: 'a> {
+ pub bcx: &'a BuildContext<'a, 'cfg>,
+ pub compilation: Compilation<'cfg>,
+ pub build_state: Arc<BuildState>,
+ pub build_script_overridden: HashSet<(PackageId, Kind)>,
+ pub build_explicit_deps: HashMap<Unit<'a>, BuildDeps>,
+ pub fingerprints: HashMap<Unit<'a>, Arc<Fingerprint>>,
+ pub compiled: HashSet<Unit<'a>>,
+ pub build_scripts: HashMap<Unit<'a>, Arc<BuildScripts>>,
+ pub links: Links<'a>,
+ pub used_in_plugin: HashSet<Unit<'a>>,
+ pub jobserver: Client,
+ unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
+ files: Option<CompilationFiles<'a, 'cfg>>,
+}
+
+impl<'a, 'cfg> Context<'a, 'cfg> {
+ pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult<Self> {
+ // Load up the jobserver that we'll use to manage our parallelism. This
+ // is the same as the GNU make implementation of a jobserver, and
+ // intentionally so! It's hoped that we can interact with GNU make and
+ // all share the same jobserver.
+ //
+ // Note that if we don't have a jobserver in our environment then we
+ // create our own, and we create it with `n-1` tokens because one token
+ // is ourself, a running process.
+ let jobserver = match config.jobserver_from_env() {
+ Some(c) => c.clone(),
+ None => Client::new(bcx.build_config.jobs as usize - 1)
+ .chain_err(|| "failed to create jobserver")?,
+ };
+
+ let mut compilation = Compilation::new(config, bcx.build_config.rustc.process());
+ compilation.host_dylib_path = bcx.host_info.sysroot_libdir.clone();
+ compilation.target_dylib_path = bcx.target_info.sysroot_libdir.clone();
+ Ok(Self {
+ bcx,
+ compilation,
+ build_state: Arc::new(BuildState::new(&bcx.build_config)),
+ fingerprints: HashMap::new(),
compiled: HashSet::new(),
build_scripts: HashMap::new(),
build_explicit_deps: HashMap::new(),
links: Links::new(),
used_in_plugin: HashSet::new(),
- incremental_env,
jobserver,
build_script_overridden: HashSet::new(),
unit_dependencies: HashMap::new(),
files: None,
- extra_compiler_args,
- };
-
- cx.compilation.host_dylib_path = cx.host_info.sysroot_libdir.clone();
- cx.compilation.target_dylib_path = cx.target_info.sysroot_libdir.clone();
- Ok(cx)
+ })
}
// Returns a mapping of the root package plus its immediate dependencies to
export_dir: Option<PathBuf>,
exec: &Arc<Executor>,
) -> CargoResult<Compilation<'cfg>> {
- let mut queue = JobQueue::new(&self);
+ let mut queue = JobQueue::new(self.bcx);
self.prepare_units(export_dir, units)?;
self.prepare()?;
self.build_used_in_plugin_map(units)?;
);
}
- let feats = self.resolve.features(unit.pkg.package_id());
+ let feats = self.bcx.resolve.features(unit.pkg.package_id());
if !feats.is_empty() {
self.compilation
.cfgs
.collect()
});
}
- let rustdocflags = self.rustdocflags_args(unit)?;
+ let rustdocflags = self.bcx.rustdocflags_args(unit)?;
if !rustdocflags.is_empty() {
self.compilation
.rustdocflags
self.compilation.native_dirs.insert(dir.clone());
}
}
- self.compilation.host = self.build_config.host_triple().to_string();
- self.compilation.target = self.build_config.target_triple().to_string();
+ self.compilation.host = self.bcx.build_config.host_triple().to_string();
+ self.compilation.target = self.bcx.build_config.target_triple().to_string();
Ok(self.compilation)
}
export_dir: Option<PathBuf>,
units: &[Unit<'a>],
) -> CargoResult<()> {
- let dest = if self.build_config.release {
+ let dest = if self.bcx.build_config.release {
"release"
} else {
"debug"
};
- let host_layout = Layout::new(self.ws, None, dest)?;
- let target_layout = match self.build_config.requested_target.as_ref() {
- Some(target) => Some(Layout::new(self.ws, Some(target), dest)?),
+ let host_layout = Layout::new(self.bcx.ws, None, dest)?;
+ let target_layout = match self.bcx.build_config.requested_target.as_ref() {
+ Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?),
None => None,
};
- let deps = build_unit_dependencies(units, self)?;
+ let deps = build_unit_dependencies(units, self.bcx)?;
self.unit_dependencies = deps;
- let files =
- CompilationFiles::new(units, host_layout, target_layout, export_dir, self.ws, self);
+ let files = CompilationFiles::new(
+ units,
+ host_layout,
+ target_layout,
+ export_dir,
+ self.bcx.ws,
+ self,
+ );
self.files = Some(files);
Ok(())
}
/// - link_dst: Optional file to link/copy the result to (without metadata suffix)
/// - linkable: Whether possible to link against file (eg it's a library)
pub fn outputs(&mut self, unit: &Unit<'a>) -> CargoResult<Arc<Vec<OutputFile>>> {
- self.files.as_ref().unwrap().outputs(unit, self)
+ self.files.as_ref().unwrap().outputs(unit, self.bcx)
}
/// For a package, return all targets which are registered as dependencies
self.unit_dependencies[unit].clone()
}
- pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult<String> {
- let deps = {
- let a = unit.pkg.package_id();
- let b = dep.pkg.package_id();
- if a == b {
- &[]
- } else {
- self.resolve.dependencies_listed(a, b)
- }
- };
-
- let crate_name = dep.target.crate_name();
- let mut names = deps.iter()
- .map(|d| d.rename().unwrap_or(&crate_name));
- let name = names.next().unwrap_or(&crate_name);
- for n in names {
- if n == name {
- continue
- }
- bail!("multiple dependencies listed for the same crate must \
- all have the same name, but the dependency on `{}` \
- is listed as having different names", dep.pkg.package_id());
- }
- Ok(name.to_string())
- }
-
- /// Whether a dependency should be compiled for the host or target platform,
- /// specified by `Kind`.
- fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool {
- // If this dependency is only available for certain platforms,
- // make sure we're only enabling it for that platform.
- let platform = match dep.platform() {
- Some(p) => p,
- None => return true,
- };
- let (name, info) = match kind {
- Kind::Host => (self.build_config.host_triple(), &self.host_info),
- Kind::Target => (self.build_config.target_triple(), &self.target_info),
- };
- platform.matches(name, info.cfg())
- }
-
- /// Gets a package for the given package id.
- pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
- self.packages.get(id)
- }
-
- /// Get the user-specified linker for a particular host or target
- pub fn linker(&self, kind: Kind) -> Option<&Path> {
- self.target_config(kind).linker.as_ref().map(|s| s.as_ref())
- }
-
- /// Get the user-specified `ar` program for a particular host or target
- pub fn ar(&self, kind: Kind) -> Option<&Path> {
- self.target_config(kind).ar.as_ref().map(|s| s.as_ref())
- }
-
- /// Get the list of cfg printed out from the compiler for the specified kind
- pub fn cfg(&self, kind: Kind) -> &[Cfg] {
- let info = match kind {
- Kind::Host => &self.host_info,
- Kind::Target => &self.target_info,
- };
- info.cfg().unwrap_or(&[])
- }
-
- /// Get the target configuration for a particular host or target
- fn target_config(&self, kind: Kind) -> &TargetConfig {
- match kind {
- Kind::Host => &self.build_config.host,
- Kind::Target => &self.build_config.target,
- }
- }
-
- /// Number of jobs specified for this build
- pub fn jobs(&self) -> u32 {
- self.build_config.jobs
- }
-
pub fn incremental_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
// There's a number of ways to configure incremental compilation right
// now. In order of descending priority (first is highest priority) we
// incremental compilation or not. Primarily development profiles
// have it enabled by default while release profiles have it disabled
// by default.
- let global_cfg = self.config.get_bool("build.incremental")?.map(|c| c.val);
- let incremental = match (self.incremental_env, global_cfg, unit.profile.incremental) {
+ let global_cfg = self.bcx
+ .config
+ .get_bool("build.incremental")?
+ .map(|c| c.val);
+ let incremental = match (
+ self.bcx.incremental_env,
+ global_cfg,
+ unit.profile.incremental,
+ ) {
(Some(v), _, _) => v,
(None, Some(false), _) => false,
(None, _, other) => other,
let dir = self.files().layout(unit.kind).incremental().display();
Ok(vec!["-C".to_string(), format!("incremental={}", dir)])
}
-
- pub fn rustflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
- env_args(
- self.config,
- &self.build_config,
- self.info(&unit.kind).cfg(),
- unit.kind,
- "RUSTFLAGS",
- )
- }
-
- pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
- env_args(
- self.config,
- &self.build_config,
- self.info(&unit.kind).cfg(),
- unit.kind,
- "RUSTDOCFLAGS",
- )
- }
-
- pub fn show_warnings(&self, pkg: &PackageId) -> bool {
- pkg.source_id().is_path() || self.config.extra_verbose()
- }
-
- fn info(&self, kind: &Kind) -> &TargetInfo {
- match *kind {
- Kind::Host => &self.host_info,
- Kind::Target => &self.target_info,
- }
- }
-
- pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec<String>> {
- if let Some((ref args_unit, ref args)) = self.extra_compiler_args {
- if args_unit == unit {
- return Some(args);
- }
- }
- None
- }
}
/// Acquire extra flags to pass to the compiler from various locations.
//! (for example, with and without tests), so we actually build a dependency
//! graph of `Unit`s, which capture these properties.
-use super::{Context, Kind, Unit};
+use super::{BuildContext, Kind, Unit};
use core::dependency::Kind as DepKind;
use core::profiles::ProfileFor;
use core::{Package, Target};
pub fn build_unit_dependencies<'a, 'cfg>(
roots: &[Unit<'a>],
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
) -> CargoResult<HashMap<Unit<'a>, Vec<Unit<'a>>>> {
let mut deps = HashMap::new();
for unit in roots.iter() {
// cleared, and avoid building the lib thrice (once with `panic`, once
// without, once for --test). In particular, the lib included for
// doctests and examples are `Build` mode here.
- let profile_for = if unit.mode.is_any_test() || cx.build_config.test {
+ let profile_for = if unit.mode.is_any_test() || bcx.build_config.test {
ProfileFor::TestDependency
} else {
ProfileFor::Any
};
- deps_of(unit, cx, &mut deps, profile_for)?;
+ deps_of(unit, bcx, &mut deps, profile_for)?;
}
Ok(deps)
fn deps_of<'a, 'b, 'cfg>(
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
profile_for: ProfileFor,
) -> CargoResult<&'b [Unit<'a>]> {
// requested unit's settings are the same as `Any`, `CustomBuild` can't
// affect anything else in the hierarchy.
if !deps.contains_key(unit) {
- let unit_deps = compute_deps(unit, cx, deps, profile_for)?;
+ let unit_deps = compute_deps(unit, bcx, deps, profile_for)?;
let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect();
deps.insert(*unit, to_insert);
for (unit, profile_for) in unit_deps {
- deps_of(&unit, cx, deps, profile_for)?;
+ deps_of(&unit, bcx, deps, profile_for)?;
}
}
Ok(deps[unit].as_ref())
/// is the profile type that should be used for dependencies of the unit.
fn compute_deps<'a, 'b, 'cfg>(
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
profile_for: ProfileFor,
) -> CargoResult<Vec<(Unit<'a>, ProfileFor)>> {
if unit.mode.is_run_custom_build() {
- return compute_deps_custom_build(unit, cx, deps);
+ return compute_deps_custom_build(unit, bcx, deps);
} else if unit.mode.is_doc() && !unit.mode.is_any_test() {
// Note: This does not include Doctest.
- return compute_deps_doc(unit, cx);
+ return compute_deps_doc(unit, bcx);
}
let id = unit.pkg.package_id();
- let deps = cx.resolve.deps(id);
+ let deps = bcx.resolve.deps(id);
let mut ret = deps.filter(|&(_id, deps)| {
assert!(deps.len() > 0);
deps.iter().any(|dep| {
// If this dependency is only available for certain platforms,
// make sure we're only enabling it for that platform.
- if !cx.dep_platform_activated(dep, unit.kind) {
+ if !bcx.dep_platform_activated(dep, unit.kind) {
return false;
}
// If the dependency is optional, then we're only activating it
// if the corresponding feature was activated
- if dep.is_optional() && !cx.resolve.features(id).contains(&*dep.name()) {
+ if dep.is_optional() && !bcx.resolve.features(id).contains(&*dep.name()) {
return false;
}
// actually used!
true
})
- }).filter_map(|(id, _)| match cx.get_package(id) {
+ }).filter_map(|(id, _)| match bcx.get_package(id) {
Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| {
let mode = check_or_build_mode(&unit.mode, t);
- let unit = new_unit(cx, pkg, t, profile_for, unit.kind.for_target(t), mode);
+ let unit = new_unit(bcx, pkg, t, profile_for, unit.kind.for_target(t), mode);
Ok((unit, profile_for))
}),
Err(e) => Some(Err(e)),
if unit.target.is_custom_build() {
return Ok(ret);
}
- ret.extend(dep_build_script(unit, cx));
+ ret.extend(dep_build_script(unit, bcx));
// If this target is a binary, test, example, etc, then it depends on
// the library of the same package. The call to `resolve.deps` above
if unit.target.is_lib() && unit.mode != CompileMode::Doctest {
return Ok(ret);
}
- ret.extend(maybe_lib(unit, cx, profile_for));
+ ret.extend(maybe_lib(unit, bcx, profile_for));
Ok(ret)
}
/// the returned set of units must all be run before `unit` is run.
fn compute_deps_custom_build<'a, 'cfg>(
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
) -> CargoResult<Vec<(Unit<'a>, ProfileFor)>> {
// When not overridden, then the dependencies to run a build script are:
kind: unit.kind,
mode: CompileMode::Build,
};
- let deps = deps_of(&tmp, cx, deps, ProfileFor::Any)?;
+ let deps = deps_of(&tmp, bcx, deps, ProfileFor::Any)?;
Ok(deps.iter()
.filter_map(|unit| {
if !unit.target.linkable() || unit.pkg.manifest().links().is_none() {
return None;
}
- dep_build_script(unit, cx)
+ dep_build_script(unit, bcx)
})
.chain(Some((
new_unit(
- cx,
+ bcx,
unit.pkg,
unit.target,
ProfileFor::CustomBuild,
/// Returns the dependencies necessary to document a package
fn compute_deps_doc<'a, 'cfg>(
unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
+ bcx: &BuildContext<'a, 'cfg>,
) -> CargoResult<Vec<(Unit<'a>, ProfileFor)>> {
- let deps = cx.resolve
+ let deps = bcx.resolve
.deps(unit.pkg.package_id())
.filter(|&(_id, deps)| {
deps.iter().any(|dep| match dep.kind() {
- DepKind::Normal => cx.dep_platform_activated(dep, unit.kind),
+ DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind),
_ => false,
})
})
- .map(|(id, _deps)| cx.get_package(id));
+ .map(|(id, _deps)| bcx.get_package(id));
// To document a library, we depend on dependencies actually being
// built. If we're documenting *all* libraries, then we also depend on
// However, for plugins/proc-macros, deps should be built like normal.
let mode = check_or_build_mode(&unit.mode, lib);
let lib_unit = new_unit(
- cx,
+ bcx,
dep,
lib,
ProfileFor::Any,
if let CompileMode::Doc { deps: true } = unit.mode {
// Document this lib as well.
let doc_unit = new_unit(
- cx,
+ bcx,
dep,
lib,
ProfileFor::Any,
}
// Be sure to build/run the build script for documented libraries as
- ret.extend(dep_build_script(unit, cx));
+ ret.extend(dep_build_script(unit, bcx));
// If we document a binary, we need the library available
if unit.target.is_bin() {
- ret.extend(maybe_lib(unit, cx, ProfileFor::Any));
+ ret.extend(maybe_lib(unit, bcx, ProfileFor::Any));
}
Ok(ret)
}
fn maybe_lib<'a>(
unit: &Unit<'a>,
- cx: &Context,
+ bcx: &BuildContext,
profile_for: ProfileFor,
) -> Option<(Unit<'a>, ProfileFor)> {
let mode = check_or_build_mode(&unit.mode, unit.target);
unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| {
- let unit = new_unit(cx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode);
+ let unit = new_unit(bcx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode);
(unit, profile_for)
})
}
/// script itself doesn't have any dependencies, so even in that case a unit
/// of work is still returned. `None` is only returned if the package has no
/// build script.
-fn dep_build_script<'a>(unit: &Unit<'a>, cx: &Context) -> Option<(Unit<'a>, ProfileFor)> {
+fn dep_build_script<'a>(unit: &Unit<'a>, bcx: &BuildContext) -> Option<(Unit<'a>, ProfileFor)> {
unit.pkg
.targets()
.iter()
Unit {
pkg: unit.pkg,
target: t,
- profile: cx.profiles.get_profile_run_custom_build(&unit.profile),
+ profile: bcx.profiles.get_profile_run_custom_build(&unit.profile),
kind: unit.kind,
mode: CompileMode::RunCustomBuild,
},
}
fn new_unit<'a>(
- cx: &Context,
+ bcx: &BuildContext,
pkg: &'a Package,
target: &'a Target,
profile_for: ProfileFor,
kind: Kind,
mode: CompileMode,
) -> Unit<'a> {
- let profile = cx.profiles.get_profile(
+ let profile = bcx.profiles.get_profile(
&pkg.name(),
- cx.ws.is_member(pkg),
+ bcx.ws.is_member(pkg),
profile_for,
mode,
- cx.build_config.release,
+ bcx.build_config.release,
);
Unit {
pkg,
fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> {
assert!(unit.mode.is_run_custom_build());
+ let bcx = &cx.bcx;
let dependencies = cx.dep_targets(unit);
let build_script_unit = dependencies
.iter()
let debug = unit.profile.debuginfo.unwrap_or(0) != 0;
cmd.env("OUT_DIR", &build_output)
.env("CARGO_MANIFEST_DIR", unit.pkg.root())
- .env("NUM_JOBS", &cx.jobs().to_string())
+ .env("NUM_JOBS", &bcx.jobs().to_string())
.env(
"TARGET",
&match unit.kind {
- Kind::Host => &cx.build_config.host_triple(),
- Kind::Target => cx.build_config.target_triple(),
+ Kind::Host => &bcx.build_config.host_triple(),
+ Kind::Target => bcx.build_config.target_triple(),
},
)
.env("DEBUG", debug.to_string())
.env("OPT_LEVEL", &unit.profile.opt_level.to_string())
.env(
"PROFILE",
- if cx.build_config.release {
+ if bcx.build_config.release {
"release"
} else {
"debug"
},
)
- .env("HOST", &cx.build_config.host_triple())
- .env("RUSTC", &cx.build_config.rustc.path)
- .env("RUSTDOC", &*cx.config.rustdoc()?)
+ .env("HOST", &bcx.build_config.host_triple())
+ .env("RUSTC", &bcx.build_config.rustc.path)
+ .env("RUSTDOC", &*bcx.config.rustdoc()?)
.inherit_jobserver(&cx.jobserver);
- if let Some(ref linker) = cx.build_config.target.linker {
+ if let Some(ref linker) = bcx.build_config.target.linker {
cmd.env("RUSTC_LINKER", linker);
}
// Be sure to pass along all enabled features for this package, this is the
// last piece of statically known information that we have.
- for feat in cx.resolve.features(unit.pkg.package_id()).iter() {
+ for feat in bcx.resolve.features(unit.pkg.package_id()).iter() {
cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
}
let mut cfg_map = HashMap::new();
- for cfg in cx.cfg(unit.kind) {
+ for cfg in bcx.cfg(unit.kind) {
match *cfg {
Cfg::Name(ref n) => {
cfg_map.insert(n.clone(), None);
);
let build_scripts = super::load_build_deps(cx, unit);
let kind = unit.kind;
- let json_messages = cx.build_config.json_messages;
+ let json_messages = bcx.build_config.json_messages;
// Check to see if the build script has already run, and if it has keep
// track of whether it has told us about some explicit dependencies
use util::paths;
use util::{internal, profile, Dirty, Fresh, Freshness};
-use super::context::{Context, FileFlavor, Unit};
+use super::context::{BuildContext, Context, FileFlavor, Unit};
use super::custom_build::BuildDeps;
use super::job::Work;
unit.pkg.package_id(),
unit.target.name()
));
+ let bcx = cx.bcx;
let new = cx.files().fingerprint_dir(unit);
let loc = new.join(&filename(cx, unit));
// changed then an error is issued.
if compare.is_err() {
let source_id = unit.pkg.package_id().source_id();
- let sources = cx.packages.sources();
+ let sources = bcx.packages.sources();
let source = sources
.get(source_id)
.ok_or_else(|| internal("missing package source"))?;
}
}
- let allow_failure = cx.extra_args_for(unit).is_some();
+ let allow_failure = bcx.extra_args_for(unit).is_some();
let target_root = cx.files().target_root().to_path_buf();
let write_fingerprint = Work::new(move |_| {
match fingerprint.update_local(&target_root) {
cx: &mut Context<'a, 'cfg>,
unit: &Unit<'a>,
) -> CargoResult<Arc<Fingerprint>> {
+ let bcx = cx.bcx;
if let Some(s) = cx.fingerprints.get(unit) {
return Ok(Arc::clone(s));
}
.filter(|u| !u.target.is_custom_build() && !u.target.is_bin())
.map(|dep| {
calculate(cx, dep).and_then(|fingerprint| {
- let name = cx.extern_crate_name(unit, dep)?;
+ let name = cx.bcx.extern_crate_name(unit, dep)?;
Ok((dep.pkg.package_id().to_string(), name, fingerprint))
})
})
let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?;
LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info)
} else {
- let fingerprint = pkg_fingerprint(cx, unit.pkg)?;
+ let fingerprint = pkg_fingerprint(&cx.bcx, unit.pkg)?;
LocalFingerprint::Precalculated(fingerprint)
};
let mut deps = deps;
deps.sort_by(|&(ref a, _, _), &(ref b, _, _)| a.cmp(b));
let extra_flags = if unit.mode.is_doc() {
- cx.rustdocflags_args(unit)?
+ bcx.rustdocflags_args(unit)?
} else {
- cx.rustflags_args(unit)?
+ bcx.rustflags_args(unit)?
};
let profile_hash = util::hash_u64(&(
&unit.profile,
unit.mode,
- cx.extra_args_for(unit),
+ bcx.extra_args_for(unit),
cx.incremental_args(unit)?,
));
let fingerprint = Arc::new(Fingerprint {
- rustc: util::hash_u64(&cx.build_config.rustc.verbose_version),
+ rustc: util::hash_u64(&bcx.build_config.rustc.verbose_version),
target: util::hash_u64(&unit.target),
profile: profile_hash,
// Note that .0 is hashed here, not .1 which is the cwd. That doesn't
// actually affect the output artifact so there's no need to hash it.
- path: util::hash_u64(&super::path_args(cx, unit).0),
- features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())),
+ path: util::hash_u64(&super::path_args(&cx.bcx, unit).0),
+ features: format!("{:?}", bcx.resolve.features_sorted(unit.pkg.package_id())),
deps,
local: vec![local],
memoized_hash: Mutex::new(None),
let output = deps.build_script_output.clone();
if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() {
debug!("old local fingerprints deps");
- let s = pkg_fingerprint(cx, unit.pkg)?;
+ let s = pkg_fingerprint(&cx.bcx, unit.pkg)?;
return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output)));
}
}
}
-fn pkg_fingerprint(cx: &Context, pkg: &Package) -> CargoResult<String> {
+fn pkg_fingerprint(bcx: &BuildContext, pkg: &Package) -> CargoResult<String> {
let source_id = pkg.package_id().source_id();
- let sources = cx.packages.sources();
+ let sources = bcx.packages.sources();
let source = sources
.get(source_id)
use util::{Config, DependencyQueue, Dirty, Fresh, Freshness};
use super::job::Job;
-use super::{Context, Kind, Unit};
+use super::{BuildContext, Context, Kind, Unit};
/// A management structure of the entire dependency graph to compile.
///
}
impl<'a> JobQueue<'a> {
- pub fn new<'cfg>(cx: &Context<'a, 'cfg>) -> JobQueue<'a> {
+ pub fn new<'cfg>(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a> {
let (tx, rx) = channel();
JobQueue {
queue: DependencyQueue::new(),
compiled: HashSet::new(),
documented: HashSet::new(),
counts: HashMap::new(),
- is_release: cx.build_config.release,
+ is_release: bcx.build_config.release,
}
}
// we're able to perform some parallel work.
while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() {
let (key, job, fresh) = queue.remove(0);
- self.run(key, fresh, job, cx.config, scope)?;
+ self.run(key, fresh, job, cx.bcx.config, scope)?;
}
// If after all that we're not actually running anything then we're
match self.rx.recv().unwrap() {
Message::Run(cmd) => {
- cx.config.shell().verbose(|c| c.status("Running", &cmd))?;
+ cx.bcx
+ .config
+ .shell()
+ .verbose(|c| c.status("Running", &cmd))?;
}
Message::Stdout(out) => {
- if cx.config.extra_verbose() {
+ if cx.bcx.config.extra_verbose() {
println!("{}", out);
}
}
Message::Stderr(err) => {
- if cx.config.extra_verbose() {
- writeln!(cx.config.shell().err(), "{}", err)?;
+ if cx.bcx.config.extra_verbose() {
+ writeln!(cx.bcx.config.shell().err(), "{}", err)?;
}
}
Message::Finish(key, result) => {
if self.active > 0 {
error = Some(format_err!("build failed"));
- handle_error(e, &mut *cx.config.shell());
- cx.config.shell().warn(
+ handle_error(e, &mut *cx.bcx.config.shell());
+ cx.bcx.config.shell().warn(
"build failed, waiting for other \
jobs to finish...",
)?;
// list of Units built, and maybe display a list of the different
// profiles used. However, to keep it simple and compatible with old
// behavior, we just display what the base profile is.
- let profile = cx.profiles.base_profile(self.is_release);
+ let profile = cx.bcx.profiles.base_profile(self.is_release);
let mut opt_type = String::from(if profile.opt_level.as_str() == "0" {
"unoptimized"
} else {
if profile.debuginfo.is_some() {
opt_type += " + debuginfo";
}
- let duration = cx.config.creation_time().elapsed();
+ let duration = cx.bcx.config.creation_time().elapsed();
let time_elapsed = format!(
"{}.{:02} secs",
duration.as_secs(),
"{} [{}] target(s) in {}",
build_type, opt_type, time_elapsed
);
- cx.config.shell().status("Finished", message)?;
+ cx.bcx.config.shell().status("Finished", message)?;
Ok(())
} else if let Some(e) = error {
Err(e)
fn emit_warnings(&self, msg: Option<&str>, key: &Key<'a>, cx: &mut Context) -> CargoResult<()> {
let output = cx.build_state.outputs.lock().unwrap();
+ let bcx = &mut cx.bcx;
if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) {
if let Some(msg) = msg {
if !output.warnings.is_empty() {
- writeln!(cx.config.shell().err(), "{}\n", msg)?;
+ writeln!(bcx.config.shell().err(), "{}\n", msg)?;
}
}
for warning in output.warnings.iter() {
- cx.config.shell().warn(warning)?;
+ bcx.config.shell().warn(warning)?;
}
if !output.warnings.is_empty() && msg.is_some() {
// Output an empty line.
- writeln!(cx.config.shell().err(), "")?;
+ writeln!(bcx.config.shell().err(), "")?;
}
}
}
fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> {
- if key.mode.is_run_custom_build() && cx.show_warnings(key.pkg) {
+ if key.mode.is_run_custom_build() && cx.bcx.show_warnings(key.pkg) {
self.emit_warnings(None, &key, cx)?;
}
fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult<Vec<Key<'a>>> {
let unit = Unit {
- pkg: cx.get_package(self.pkg)?,
+ pkg: cx.bcx.get_package(self.pkg)?,
target: self.target,
profile: self.profile,
kind: self.kind,
use self::output_depinfo::output_depinfo;
pub use self::compilation::Compilation;
-pub use self::context::{Context, FileFlavor, TargetInfo, Unit};
+pub use self::context::{BuildContext, Context, FileFlavor, TargetInfo, Unit};
pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts};
pub use self::layout::is_bad_artifact_name;
unit: &Unit<'a>,
exec: &Arc<Executor>,
) -> CargoResult<()> {
+ let bcx = cx.bcx;
if !cx.compiled.insert(*unit) {
return Ok(());
}
// we've got everything constructed.
let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name()));
fingerprint::prepare_init(cx, unit)?;
- cx.links.validate(cx.resolve, unit)?;
+ cx.links.validate(bcx.resolve, unit)?;
let (dirty, fresh, freshness) = if unit.mode.is_run_custom_build() {
custom_build::prepare(cx, unit)?
}
fn rustc<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
+ mut cx: &mut Context<'a, 'cfg>,
unit: &Unit<'a>,
exec: &Arc<Executor>,
) -> CargoResult<Work> {
// If this is an upstream dep we don't want warnings from, turn off all
// lints.
- if !cx.show_warnings(unit.pkg.package_id()) {
+ if !cx.bcx.show_warnings(unit.pkg.package_id()) {
rustc.arg("--cap-lints").arg("allow");
// If this is an upstream dep but we *do* want warnings, make sure that they
} else {
root.join(&cx.files().file_stem(unit))
}.with_extension("d");
- let dep_info_loc = fingerprint::dep_info_loc(cx, unit);
+ let dep_info_loc = fingerprint::dep_info_loc(&mut cx, unit);
- rustc.args(&cx.rustflags_args(unit)?);
- let json_messages = cx.build_config.json_messages;
+ rustc.args(&cx.bcx.rustflags_args(unit)?);
+ let json_messages = cx.bcx.build_config.json_messages;
let package_id = unit.pkg.package_id().clone();
let target = unit.target.clone();
let pkg_root = unit.pkg.root().to_path_buf();
let cwd = rustc
.get_cwd()
- .unwrap_or_else(|| cx.config.cwd())
+ .unwrap_or_else(|| cx.bcx.config.cwd())
.to_path_buf();
return Ok(Work::new(move |state| {
unit: &Unit<'a>,
fresh: bool,
) -> CargoResult<Work> {
+ let bcx = cx.bcx;
let outputs = cx.outputs(unit)?;
let export_dir = cx.files().export_dir(unit);
let package_id = unit.pkg.package_id().clone();
let target = unit.target.clone();
let profile = unit.profile;
let unit_mode = unit.mode;
- let features = cx.resolve
+ let features = bcx.resolve
.features_sorted(&package_id)
.into_iter()
.map(|s| s.to_owned())
.collect();
- let json_messages = cx.build_config.json_messages;
+ let json_messages = bcx.build_config.json_messages;
Ok(Work::new(move |_| {
// If we're a "root crate", e.g. the target of this compilation, then we
}
fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Work> {
+ let bcx = cx.bcx;
let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?;
rustdoc.inherit_jobserver(&cx.jobserver);
rustdoc.arg("--crate-name").arg(&unit.target.crate_name());
- add_path_args(cx, unit, &mut rustdoc);
+ add_path_args(&cx.bcx, unit, &mut rustdoc);
if unit.kind != Kind::Host {
- if let Some(ref target) = cx.build_config.requested_target {
+ if let Some(ref target) = bcx.build_config.requested_target {
rustdoc.arg("--target").arg(target);
}
}
rustdoc.arg("-o").arg(doc_dir);
- for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
+ for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) {
rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
}
rustdoc.arg(format!("--edition={}", &manifest.edition()));
}
- if let Some(ref args) = cx.extra_args_for(unit) {
+ if let Some(ref args) = bcx.extra_args_for(unit) {
rustdoc.args(args);
}
build_deps_args(&mut rustdoc, cx, unit)?;
- rustdoc.args(&cx.rustdocflags_args(unit)?);
+ rustdoc.args(&bcx.rustdocflags_args(unit)?);
let name = unit.pkg.name().to_string();
let build_state = cx.build_state.clone();
//
// The first returned value here is the argument to pass to rustc, and the
// second is the cwd that rustc should operate in.
-fn path_args(cx: &Context, unit: &Unit) -> (PathBuf, PathBuf) {
- let ws_root = cx.ws.root();
+fn path_args(bcx: &BuildContext, unit: &Unit) -> (PathBuf, PathBuf) {
+ let ws_root = bcx.ws.root();
let src = unit.target.src_path();
assert!(src.is_absolute());
match src.strip_prefix(ws_root) {
}
}
-fn add_path_args(cx: &Context, unit: &Unit, cmd: &mut ProcessBuilder) {
- let (arg, cwd) = path_args(cx, unit);
+fn add_path_args(bcx: &BuildContext, unit: &Unit, cmd: &mut ProcessBuilder) {
+ let (arg, cwd) = path_args(bcx, unit);
cmd.arg(arg);
cmd.cwd(cwd);
}
) -> CargoResult<()> {
assert!(!unit.mode.is_run_custom_build());
+ let bcx = cx.bcx;
let Profile {
ref opt_level,
ref lto,
cmd.arg("--crate-name").arg(&unit.target.crate_name());
- add_path_args(cx, unit, cmd);
+ add_path_args(&cx.bcx, unit, cmd);
- match cx.config.shell().color_choice() {
+ match bcx.config.shell().color_choice() {
ColorChoice::Always => {
cmd.arg("--color").arg("always");
}
ColorChoice::CargoAuto => {}
}
- if cx.build_config.json_messages {
+ if bcx.build_config.json_messages {
cmd.arg("--error-format").arg("json");
}
}
let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build())
- || (crate_types.contains(&"dylib") && cx.ws.members().any(|p| p != unit.pkg));
+ || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg));
if prefer_dynamic {
cmd.arg("-C").arg("prefer-dynamic");
}
cmd.arg("-C").arg(format!("debuginfo={}", debuginfo));
}
- if let Some(ref args) = cx.extra_args_for(unit) {
+ if let Some(ref args) = bcx.extra_args_for(unit) {
cmd.args(args);
}
// We ideally want deterministic invocations of rustc to ensure that
// rustc-caching strategies like sccache are able to cache more, so sort the
// feature list here.
- for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
+ for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) {
cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
}
cmd,
"--target",
"",
- cx.build_config
+ bcx.build_config
.requested_target
.as_ref()
.map(|s| s.as_ref()),
);
}
- opt(cmd, "-C", "ar=", cx.ar(unit.kind).map(|s| s.as_ref()));
+ opt(cmd, "-C", "ar=", bcx.ar(unit.kind).map(|s| s.as_ref()));
opt(
cmd,
"-C",
"linker=",
- cx.linker(unit.kind).map(|s| s.as_ref()),
+ bcx.linker(unit.kind).map(|s| s.as_ref()),
);
cmd.args(&cx.incremental_args(unit)?);
cx: &mut Context<'a, 'cfg>,
unit: &Unit<'a>,
) -> CargoResult<()> {
+ let bcx = cx.bcx;
cmd.arg("-L").arg(&{
let mut deps = OsString::from("dependency=");
deps.push(cx.files().deps_dir(unit));
.iter()
.find(|u| !u.mode.is_doc() && u.target.is_lib())
{
- cx.config.shell().warn(format!(
+ bcx.config.shell().warn(format!(
"The package `{}` \
provides no linkable target. The compiler might raise an error while compiling \
`{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \
current: &Unit<'a>,
dep: &Unit<'a>,
) -> CargoResult<()> {
+ let bcx = cx.bcx;
for output in cx.outputs(dep)?.iter() {
if output.flavor != FileFlavor::Linkable {
continue;
}
let mut v = OsString::new();
- let name = cx.extern_crate_name(current, dep)?;
+ let name = bcx.extern_crate_name(current, dep)?;
v.push(name);
v.push("=");
v.push(cx.files().out_dir(dep));
Ok(())
}
-pub fn output_depinfo<'a, 'b>(context: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> {
+pub fn output_depinfo<'a, 'b>(cx: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> {
+ let bcx = cx.bcx;
let mut deps = BTreeSet::new();
let mut visited = HashSet::new();
- let success = add_deps_for_unit(&mut deps, context, unit, &mut visited).is_ok();
+ let success = add_deps_for_unit(&mut deps, cx, unit, &mut visited).is_ok();
let basedir_string;
- let basedir = match context.config.get_path("build.dep-info-basedir")? {
+ let basedir = match bcx.config.get_path("build.dep-info-basedir")? {
Some(value) => {
basedir_string = value
.val
.map(|f| render_filename(f, basedir))
.collect::<CargoResult<Vec<_>>>()?;
- for output in context.outputs(unit)?.iter() {
+ for output in cx.outputs(unit)?.iter() {
if let Some(ref link_dst) = output.hardlink {
let output_path = link_dst.with_extension("d");
if success {
use std::fs;
use std::path::Path;
-use core::compiler::{BuildConfig, Context, Kind, Unit};
+use core::compiler::{BuildConfig, BuildContext, Context, Kind, Unit};
use core::profiles::ProfileFor;
use core::Workspace;
use ops::{self, CompileMode};
let mut build_config = BuildConfig::new(config, Some(1), &opts.target, None)?;
build_config.release = opts.release;
- let mut cx = Context::new(
+ let bcx = BuildContext::new(
ws,
&resolve,
&packages,
profiles,
None,
)?;
+ let mut cx = Context::new(config, &bcx)?;
cx.prepare_units(None, &units)?;
for unit in units.iter() {
use std::path::{Path, PathBuf};
use std::sync::Arc;
-use core::compiler::{BuildConfig, Compilation, Context, DefaultExecutor, Executor};
+use core::compiler::{BuildConfig, BuildContext, Compilation, Context, DefaultExecutor, Executor};
use core::compiler::{Kind, Unit};
use core::profiles::{ProfileFor, Profiles};
use core::resolver::{Method, Resolve};
let mut ret = {
let _p = profile::start("compiling");
- let mut cx = Context::new(
+ let bcx = BuildContext::new(
ws,
&resolve_with_overrides,
&packages,
profiles,
extra_compiler_args,
)?;
+ let mut cx = Context::new(config, &bcx)?;
cx.compile(&units, export_dir.clone(), &exec)?
};