--- /dev/null
+use std::collections::{BTreeSet, HashMap, HashSet};
+use std::ffi::OsStr;
+use std::path::PathBuf;
+
+use semver::Version;
+use lazycell::LazyCell;
+
+use core::{Package, PackageId, Target, TargetKind};
+use util::{self, join_paths, process, CargoResult, Config, ProcessBuilder};
+
+/// A structure returning the result of a compilation.
+pub struct Compilation<'cfg> {
+ /// A mapping from a package to the list of libraries that need to be
+ /// linked when working with that package.
+ pub libraries: HashMap<PackageId, HashSet<(Target, PathBuf)>>,
+
+ /// An array of all tests created during this compilation.
+ pub tests: Vec<(Package, TargetKind, String, PathBuf)>,
+
+ /// An array of all binaries created.
+ pub binaries: Vec<PathBuf>,
+
+ /// All directories for the output of native build commands.
+ ///
+ /// This is currently used to drive some entries which are added to the
+ /// LD_LIBRARY_PATH as appropriate.
+ ///
+ /// The order should be deterministic.
+ // TODO: deprecated, remove
+ pub native_dirs: BTreeSet<PathBuf>,
+
+ /// Root output directory (for the local package's artifacts)
+ pub root_output: PathBuf,
+
+ /// Output directory for rust dependencies.
+ /// May be for the host or for a specific target.
+ pub deps_output: PathBuf,
+
+ /// Output directory for the rust host dependencies.
+ pub host_deps_output: PathBuf,
+
+ /// The path to rustc's own libstd
+ pub host_dylib_path: Option<PathBuf>,
+
+ /// The path to libstd for the target
+ pub target_dylib_path: Option<PathBuf>,
+
+ /// Extra environment variables that were passed to compilations and should
+ /// be passed to future invocations of programs.
+ pub extra_env: HashMap<PackageId, Vec<(String, String)>>,
+
+ pub to_doc_test: Vec<Package>,
+
+ /// Features per package enabled during this compilation.
+ pub cfgs: HashMap<PackageId, HashSet<String>>,
+
+ /// Flags to pass to rustdoc when invoked from cargo test, per package.
+ pub rustdocflags: HashMap<PackageId, Vec<String>>,
+
+ pub target: String,
+
+ config: &'cfg Config,
+
+ target_runner: LazyCell<Option<(PathBuf, Vec<String>)>>,
+}
+
+impl<'cfg> Compilation<'cfg> {
+ pub fn new(config: &'cfg Config) -> Compilation<'cfg> {
+ Compilation {
+ libraries: HashMap::new(),
+ native_dirs: BTreeSet::new(), // TODO: deprecated, remove
+ root_output: PathBuf::from("/"),
+ deps_output: PathBuf::from("/"),
+ host_deps_output: PathBuf::from("/"),
+ host_dylib_path: None,
+ target_dylib_path: None,
+ tests: Vec::new(),
+ binaries: Vec::new(),
+ extra_env: HashMap::new(),
+ to_doc_test: Vec::new(),
+ cfgs: HashMap::new(),
+ rustdocflags: HashMap::new(),
+ config,
+ target: String::new(),
+ target_runner: LazyCell::new(),
+ }
+ }
+
+ /// See `process`.
+ pub fn rustc_process(&self, pkg: &Package) -> CargoResult<ProcessBuilder> {
+ self.fill_env(self.config.rustc()?.process(), pkg, true)
+ }
+
+ /// See `process`.
+ pub fn rustdoc_process(&self, pkg: &Package) -> CargoResult<ProcessBuilder> {
+ self.fill_env(process(&*self.config.rustdoc()?), pkg, false)
+ }
+
+ /// See `process`.
+ pub fn host_process<T: AsRef<OsStr>>(
+ &self,
+ cmd: T,
+ pkg: &Package,
+ ) -> CargoResult<ProcessBuilder> {
+ self.fill_env(process(cmd), pkg, true)
+ }
+
+ fn target_runner(&self) -> CargoResult<&Option<(PathBuf, Vec<String>)>> {
+ self.target_runner.try_borrow_with(|| {
+ let key = format!("target.{}.runner", self.target);
+ Ok(self.config.get_path_and_args(&key)?.map(|v| v.val))
+ })
+ }
+
+ /// See `process`.
+ pub fn target_process<T: AsRef<OsStr>>(
+ &self,
+ cmd: T,
+ pkg: &Package,
+ ) -> CargoResult<ProcessBuilder> {
+ let builder = if let Some((ref runner, ref args)) = *self.target_runner()? {
+ let mut builder = process(runner);
+ builder.args(args);
+ builder.arg(cmd);
+ builder
+ } else {
+ process(cmd)
+ };
+ self.fill_env(builder, pkg, false)
+ }
+
+ /// Prepares a new process with an appropriate environment to run against
+ /// the artifacts produced by the build process.
+ ///
+ /// The package argument is also used to configure environment variables as
+ /// well as the working directory of the child process.
+ fn fill_env(
+ &self,
+ mut cmd: ProcessBuilder,
+ pkg: &Package,
+ is_host: bool,
+ ) -> CargoResult<ProcessBuilder> {
+ let mut search_path = if is_host {
+ let mut search_path = vec![self.host_deps_output.clone()];
+ search_path.extend(self.host_dylib_path.clone());
+ search_path
+ } else {
+ let mut search_path =
+ super::filter_dynamic_search_path(self.native_dirs.iter(), &self.root_output);
+ search_path.push(self.root_output.clone());
+ search_path.push(self.deps_output.clone());
+ search_path.extend(self.target_dylib_path.clone());
+ search_path
+ };
+
+ search_path.extend(util::dylib_path().into_iter());
+ let search_path = join_paths(&search_path, util::dylib_path_envvar())?;
+
+ cmd.env(util::dylib_path_envvar(), &search_path);
+ if let Some(env) = self.extra_env.get(pkg.package_id()) {
+ for &(ref k, ref v) in env {
+ cmd.env(k, v);
+ }
+ }
+
+ let metadata = pkg.manifest().metadata();
+
+ let cargo_exe = self.config.cargo_exe()?;
+ cmd.env(::CARGO_ENV, cargo_exe);
+
+ // When adding new environment variables depending on
+ // crate properties which might require rebuild upon change
+ // consider adding the corresponding properties to the hash
+ // in Context::target_metadata()
+ cmd.env("CARGO_MANIFEST_DIR", pkg.root())
+ .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string())
+ .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string())
+ .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string())
+ .env(
+ "CARGO_PKG_VERSION_PRE",
+ &pre_version_component(pkg.version()),
+ )
+ .env("CARGO_PKG_VERSION", &pkg.version().to_string())
+ .env("CARGO_PKG_NAME", &*pkg.name())
+ .env(
+ "CARGO_PKG_DESCRIPTION",
+ metadata.description.as_ref().unwrap_or(&String::new()),
+ )
+ .env(
+ "CARGO_PKG_HOMEPAGE",
+ metadata.homepage.as_ref().unwrap_or(&String::new()),
+ )
+ .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":"))
+ .cwd(pkg.root());
+ Ok(cmd)
+ }
+}
+
+fn pre_version_component(v: &Version) -> String {
+ if v.pre.is_empty() {
+ return String::new();
+ }
+
+ let mut ret = String::new();
+
+ for (i, x) in v.pre.iter().enumerate() {
+ if i != 0 {
+ ret.push('.')
+ };
+ ret.push_str(&x.to_string());
+ }
+
+ ret
+}
--- /dev/null
+use std::collections::HashMap;
+use std::env;
+use std::fmt;
+use std::hash::{Hash, Hasher, SipHasher};
+use std::path::{Path, PathBuf};
+use std::sync::Arc;
+
+use lazycell::LazyCell;
+
+use core::{TargetKind, Workspace};
+use super::{Context, FileFlavor, Kind, Layout, Unit};
+use util::{self, CargoResult};
+
+#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)]
+pub struct Metadata(u64);
+
+impl fmt::Display for Metadata {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{:016x}", self.0)
+ }
+}
+
+pub struct CompilationFiles<'a, 'cfg: 'a> {
+ /// The target directory layout for the host (and target if it is the same as host)
+ pub(super) host: Layout,
+ /// The target directory layout for the target (if different from then host)
+ pub(super) target: Option<Layout>,
+ export_dir: Option<(PathBuf, Vec<Unit<'a>>)>,
+ ws: &'a Workspace<'cfg>,
+ metas: HashMap<Unit<'a>, Option<Metadata>>,
+ /// For each Unit, a list all files produced.
+ outputs: HashMap<Unit<'a>, LazyCell<Arc<Vec<OutputFile>>>>,
+}
+
+#[derive(Debug)]
+pub struct OutputFile {
+ /// File name that will be produced by the build process (in `deps`).
+ pub path: PathBuf,
+ /// If it should be linked into `target`, and what it should be called
+ /// (e.g. without metadata).
+ pub hardlink: Option<PathBuf>,
+ /// Type of the file (library / debug symbol / else).
+ pub flavor: FileFlavor,
+}
+
+impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> {
+ pub(super) fn new(
+ roots: &[Unit<'a>],
+ host: Layout,
+ target: Option<Layout>,
+ export_dir: Option<PathBuf>,
+ ws: &'a Workspace<'cfg>,
+ cx: &Context<'a, 'cfg>,
+ ) -> CompilationFiles<'a, 'cfg> {
+ let mut metas = HashMap::new();
+ for unit in roots {
+ metadata_of(unit, cx, &mut metas);
+ }
+ let outputs = metas
+ .keys()
+ .cloned()
+ .map(|unit| (unit, LazyCell::new()))
+ .collect();
+ CompilationFiles {
+ ws,
+ host,
+ target,
+ export_dir: export_dir.map(|dir| (dir, roots.to_vec())),
+ metas,
+ outputs,
+ }
+ }
+
+ /// Returns the appropriate directory layout for either a plugin or not.
+ pub fn layout(&self, kind: Kind) -> &Layout {
+ match kind {
+ Kind::Host => &self.host,
+ Kind::Target => self.target.as_ref().unwrap_or(&self.host),
+ }
+ }
+
+ /// Get the metadata for a target in a specific profile
+ /// We build to the path: "{filename}-{target_metadata}"
+ /// We use a linking step to link/copy to a predictable filename
+ /// like `target/debug/libfoo.{a,so,rlib}` and such.
+ pub fn metadata(&self, unit: &Unit<'a>) -> Option<Metadata> {
+ self.metas[unit].clone()
+ }
+
+ /// Get the short hash based only on the PackageId
+ /// Used for the metadata when target_metadata returns None
+ pub fn target_short_hash(&self, unit: &Unit) -> String {
+ let hashable = unit.pkg.package_id().stable_hash(self.ws.root());
+ util::short_hash(&hashable)
+ }
+
+ /// Returns the appropriate output directory for the specified package and
+ /// target.
+ pub fn out_dir(&self, unit: &Unit<'a>) -> PathBuf {
+ if unit.profile.doc {
+ self.layout(unit.kind).root().parent().unwrap().join("doc")
+ } else if unit.target.is_custom_build() {
+ self.build_script_dir(unit)
+ } else if unit.target.is_example() {
+ self.layout(unit.kind).examples().to_path_buf()
+ } else {
+ self.deps_dir(unit).to_path_buf()
+ }
+ }
+
+ pub fn export_dir(&self, unit: &Unit<'a>) -> Option<PathBuf> {
+ let &(ref dir, ref roots) = self.export_dir.as_ref()?;
+ if roots.contains(unit) {
+ Some(dir.clone())
+ } else {
+ None
+ }
+ }
+
+ pub fn pkg_dir(&self, unit: &Unit<'a>) -> String {
+ let name = unit.pkg.package_id().name();
+ match self.metas[unit] {
+ Some(ref meta) => format!("{}-{}", name, meta),
+ None => format!("{}-{}", name, self.target_short_hash(unit)),
+ }
+ }
+
+ /// Return the root of the build output tree
+ pub fn target_root(&self) -> &Path {
+ self.host.dest()
+ }
+
+ pub fn host_deps(&self) -> &Path {
+ self.host.deps()
+ }
+
+ /// Returns the directories where Rust crate dependencies are found for the
+ /// specified unit.
+ pub fn deps_dir(&self, unit: &Unit) -> &Path {
+ self.layout(unit.kind).deps()
+ }
+
+ pub fn fingerprint_dir(&self, unit: &Unit<'a>) -> PathBuf {
+ let dir = self.pkg_dir(unit);
+ self.layout(unit.kind).fingerprint().join(dir)
+ }
+
+ /// Returns the appropriate directory layout for either a plugin or not.
+ pub fn build_script_dir(&self, unit: &Unit<'a>) -> PathBuf {
+ assert!(unit.target.is_custom_build());
+ assert!(!unit.profile.run_custom_build);
+ let dir = self.pkg_dir(unit);
+ self.layout(Kind::Host).build().join(dir)
+ }
+
+ /// Returns the appropriate directory layout for either a plugin or not.
+ pub fn build_script_out_dir(&self, unit: &Unit<'a>) -> PathBuf {
+ assert!(unit.target.is_custom_build());
+ assert!(unit.profile.run_custom_build);
+ let dir = self.pkg_dir(unit);
+ self.layout(unit.kind).build().join(dir).join("out")
+ }
+
+ /// Returns the file stem for a given target/profile combo (with metadata)
+ pub fn file_stem(&self, unit: &Unit<'a>) -> String {
+ match self.metas[unit] {
+ Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), metadata),
+ None => self.bin_stem(unit),
+ }
+ }
+
+ pub(super) fn outputs(
+ &self,
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ ) -> CargoResult<Arc<Vec<OutputFile>>> {
+ self.outputs[unit]
+ .try_borrow_with(|| self.calc_outputs(unit, cx))
+ .map(Arc::clone)
+ }
+
+ /// Returns the bin stem for a given target (without metadata)
+ fn bin_stem(&self, unit: &Unit) -> String {
+ if unit.target.allows_underscores() {
+ unit.target.name().to_string()
+ } else {
+ unit.target.crate_name()
+ }
+ }
+
+ /// Returns a tuple with the directory and name of the hard link we expect
+ /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef
+ /// and link_stem would be out_dir/foo
+ /// This function returns it in two parts so the caller can add prefix/suffix
+ /// to filename separately
+ ///
+ /// Returns an Option because in some cases we don't want to link
+ /// (eg a dependent lib)
+ fn link_stem(&self, unit: &Unit<'a>) -> Option<(PathBuf, String)> {
+ let out_dir = self.out_dir(unit);
+ let bin_stem = self.bin_stem(unit);
+ let file_stem = self.file_stem(unit);
+
+ // We currently only lift files up from the `deps` directory. If
+ // it was compiled into something like `example/` or `doc/` then
+ // we don't want to link it up.
+ if out_dir.ends_with("deps") {
+ // Don't lift up library dependencies
+ if self.ws.members().find(|&p| p == unit.pkg).is_none() && !unit.target.is_bin() {
+ None
+ } else {
+ Some((
+ out_dir.parent().unwrap().to_owned(),
+ if unit.profile.test {
+ file_stem
+ } else {
+ bin_stem
+ },
+ ))
+ }
+ } else if bin_stem == file_stem {
+ None
+ } else if out_dir.ends_with("examples") || out_dir.parent().unwrap().ends_with("build") {
+ Some((out_dir, bin_stem))
+ } else {
+ None
+ }
+ }
+
+ fn calc_outputs(
+ &self,
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ ) -> CargoResult<Arc<Vec<OutputFile>>> {
+ let out_dir = self.out_dir(unit);
+ let file_stem = self.file_stem(unit);
+ let link_stem = self.link_stem(unit);
+ let info = if unit.target.for_host() {
+ &cx.host_info
+ } else {
+ &cx.target_info
+ };
+
+ let mut ret = Vec::new();
+ let mut unsupported = Vec::new();
+ {
+ if unit.profile.check {
+ let path = out_dir.join(format!("lib{}.rmeta", file_stem));
+ let hardlink = link_stem
+ .clone()
+ .map(|(ld, ls)| ld.join(format!("lib{}.rmeta", ls)));
+ ret.push(OutputFile {
+ path,
+ hardlink,
+ flavor: FileFlavor::Linkable,
+ });
+ } else {
+ let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> {
+ let crate_type = if crate_type == "lib" {
+ "rlib"
+ } else {
+ crate_type
+ };
+ let file_types = info.file_types(
+ crate_type,
+ flavor,
+ unit.target.kind(),
+ cx.target_triple(),
+ )?;
+
+ match file_types {
+ Some(types) => for file_type in types {
+ let path = out_dir.join(file_type.filename(&file_stem));
+ let hardlink = link_stem
+ .as_ref()
+ .map(|&(ref ld, ref ls)| ld.join(file_type.filename(ls)));
+ ret.push(OutputFile {
+ path,
+ hardlink,
+ flavor: file_type.flavor,
+ });
+ },
+ // not supported, don't worry about it
+ None => {
+ unsupported.push(crate_type.to_string());
+ }
+ }
+ Ok(())
+ };
+ //info!("{:?}", unit);
+ match *unit.target.kind() {
+ TargetKind::Bin
+ | TargetKind::CustomBuild
+ | TargetKind::ExampleBin
+ | TargetKind::Bench
+ | TargetKind::Test => {
+ add("bin", FileFlavor::Normal)?;
+ }
+ TargetKind::Lib(..) | TargetKind::ExampleLib(..) if unit.profile.test => {
+ add("bin", FileFlavor::Normal)?;
+ }
+ TargetKind::ExampleLib(ref kinds) | TargetKind::Lib(ref kinds) => {
+ for kind in kinds {
+ add(
+ kind.crate_type(),
+ if kind.linkable() {
+ FileFlavor::Linkable
+ } else {
+ FileFlavor::Normal
+ },
+ )?;
+ }
+ }
+ }
+ }
+ }
+ if ret.is_empty() {
+ if !unsupported.is_empty() {
+ bail!(
+ "cannot produce {} for `{}` as the target `{}` \
+ does not support these crate types",
+ unsupported.join(", "),
+ unit.pkg,
+ cx.target_triple()
+ )
+ }
+ bail!(
+ "cannot compile `{}` as the target `{}` does not \
+ support any of the output crate types",
+ unit.pkg,
+ cx.target_triple()
+ );
+ }
+ info!("Target filenames: {:?}", ret);
+
+ Ok(Arc::new(ret))
+ }
+}
+
+fn metadata_of<'a, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ metas: &mut HashMap<Unit<'a>, Option<Metadata>>,
+) -> Option<Metadata> {
+ if !metas.contains_key(unit) {
+ let meta = compute_metadata(unit, cx, metas);
+ metas.insert(*unit, meta);
+ for unit in cx.dep_targets(unit) {
+ metadata_of(&unit, cx, metas);
+ }
+ }
+ metas[unit].clone()
+}
+
+fn compute_metadata<'a, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ metas: &mut HashMap<Unit<'a>, Option<Metadata>>,
+) -> Option<Metadata> {
+ // No metadata for dylibs because of a couple issues
+ // - OSX encodes the dylib name in the executable
+ // - Windows rustc multiple files of which we can't easily link all of them
+ //
+ // No metadata for bin because of an issue
+ // - wasm32 rustc/emcc encodes the .wasm name in the .js (rust-lang/cargo#4535)
+ //
+ // Two exceptions
+ // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict)
+ // 2) __CARGO_DEFAULT_LIB_METADATA env var
+ //
+ // Note, though, that the compiler's build system at least wants
+ // path dependencies (eg libstd) to have hashes in filenames. To account for
+ // that we have an extra hack here which reads the
+ // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a
+ // hash in the filename if that's present.
+ //
+ // This environment variable should not be relied on! It's
+ // just here for rustbuild. We need a more principled method
+ // doing this eventually.
+ let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA");
+ if !(unit.profile.test || unit.profile.check)
+ && (unit.target.is_dylib() || unit.target.is_cdylib()
+ || (unit.target.is_bin() && cx.target_triple().starts_with("wasm32-")))
+ && unit.pkg.package_id().source_id().is_path()
+ && __cargo_default_lib_metadata.is_err()
+ {
+ return None;
+ }
+
+ let mut hasher = SipHasher::new_with_keys(0, 0);
+
+ // Unique metadata per (name, source, version) triple. This'll allow us
+ // to pull crates from anywhere w/o worrying about conflicts
+ unit.pkg
+ .package_id()
+ .stable_hash(cx.ws.root())
+ .hash(&mut hasher);
+
+ // Add package properties which map to environment variables
+ // exposed by Cargo
+ let manifest_metadata = unit.pkg.manifest().metadata();
+ manifest_metadata.authors.hash(&mut hasher);
+ manifest_metadata.description.hash(&mut hasher);
+ manifest_metadata.homepage.hash(&mut hasher);
+
+ // Also mix in enabled features to our metadata. This'll ensure that
+ // when changing feature sets each lib is separately cached.
+ cx.resolve
+ .features_sorted(unit.pkg.package_id())
+ .hash(&mut hasher);
+
+ // Mix in the target-metadata of all the dependencies of this target
+ {
+ let mut deps_metadata = cx.dep_targets(unit)
+ .iter()
+ .map(|dep| metadata_of(dep, cx, metas))
+ .collect::<Vec<_>>();
+ deps_metadata.sort();
+ deps_metadata.hash(&mut hasher);
+ }
+
+ // Throw in the profile we're compiling with. This helps caching
+ // panic=abort and panic=unwind artifacts, additionally with various
+ // settings like debuginfo and whatnot.
+ unit.profile.hash(&mut hasher);
+
+ // Artifacts compiled for the host should have a different metadata
+ // piece than those compiled for the target, so make sure we throw in
+ // the unit's `kind` as well
+ unit.kind.hash(&mut hasher);
+
+ // Finally throw in the target name/kind. This ensures that concurrent
+ // compiles of targets in the same crate don't collide.
+ unit.target.name().hash(&mut hasher);
+ unit.target.kind().hash(&mut hasher);
+
+ if let Ok(rustc) = cx.config.rustc() {
+ rustc.verbose_version.hash(&mut hasher);
+ }
+
+ // Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present.
+ // This should be the release channel, to get a different hash for each channel.
+ if let Ok(ref channel) = __cargo_default_lib_metadata {
+ channel.hash(&mut hasher);
+ }
+ Some(Metadata(hasher.finish()))
+}
--- /dev/null
+#![allow(deprecated)]
+
+use std::collections::{HashMap, HashSet};
+use std::env;
+use std::path::{Path, PathBuf};
+use std::str::{self, FromStr};
+use std::sync::Arc;
+
+use jobserver::Client;
+
+use core::{Package, PackageId, PackageSet, Profile, Resolve, Target};
+use core::{Dependency, Profiles, Workspace};
+use util::{internal, profile, Cfg, CfgExpr, Config};
+use util::errors::{CargoResult, CargoResultExt};
+
+use super::TargetConfig;
+use super::custom_build::{self, BuildDeps, BuildScripts, BuildState};
+use super::fingerprint::Fingerprint;
+use super::job_queue::JobQueue;
+use super::layout::Layout;
+use super::links::Links;
+use super::{BuildConfig, Compilation, Executor, Kind};
+
+mod unit_dependencies;
+use self::unit_dependencies::build_unit_dependencies;
+
+mod compilation_files;
+use self::compilation_files::{CompilationFiles, OutputFile};
+pub use self::compilation_files::Metadata;
+
+mod target_info;
+pub use self::target_info::FileFlavor;
+use self::target_info::TargetInfo;
+
+/// All information needed to define a Unit.
+///
+/// A unit is an object that has enough information so that cargo knows how to build it.
+/// For example, if your project has dependencies, then every dependency will be built as a library
+/// unit. If your project is a library, then it will be built as a library unit as well, or if it
+/// is a binary with `main.rs`, then a binary will be output. There are also separate unit types
+/// for `test`ing and `check`ing, amongst others.
+///
+/// The unit also holds information about all possible metadata about the package in `pkg`.
+///
+/// A unit needs to know extra information in addition to the type and root source file. For
+/// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know
+/// whether you want a debug or release build. There is enough information in this struct to figure
+/// all that out.
+#[derive(Clone, Copy, Eq, PartialEq, Hash)]
+pub struct Unit<'a> {
+ /// Information about available targets, which files to include/exclude, etc. Basically stuff in
+ /// `Cargo.toml`.
+ pub pkg: &'a Package,
+ /// Information about the specific target to build, out of the possible targets in `pkg`. Not
+ /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a
+ /// build.
+ pub target: &'a Target,
+ /// The profile contains information about *how* the build should be run, including debug
+ /// level, extra args to pass to rustc, etc.
+ pub profile: &'a Profile,
+ /// Whether this compilation unit is for the host or target architecture.
+ ///
+ /// For example, when
+ /// cross compiling and using a custom build script, the build script needs to be compiled for
+ /// the host architecture so the host rustc can use it (when compiling to the target
+ /// architecture).
+ pub kind: Kind,
+}
+
+/// The build context, containing all information about a build task
+pub struct Context<'a, 'cfg: 'a> {
+ /// The workspace the build is for
+ pub ws: &'a Workspace<'cfg>,
+ /// The cargo configuration
+ pub config: &'cfg Config,
+ /// The dependency graph for our build
+ pub resolve: &'a Resolve,
+ /// Information on the compilation output
+ pub compilation: Compilation<'cfg>,
+ pub packages: &'a PackageSet<'cfg>,
+ pub build_state: Arc<BuildState>,
+ pub build_script_overridden: HashSet<(PackageId, Kind)>,
+ pub build_explicit_deps: HashMap<Unit<'a>, BuildDeps>,
+ pub fingerprints: HashMap<Unit<'a>, Arc<Fingerprint>>,
+ pub compiled: HashSet<Unit<'a>>,
+ pub build_config: BuildConfig,
+ pub build_scripts: HashMap<Unit<'a>, Arc<BuildScripts>>,
+ pub links: Links<'a>,
+ pub used_in_plugin: HashSet<Unit<'a>>,
+ pub jobserver: Client,
+
+ target_info: TargetInfo,
+ host_info: TargetInfo,
+ profiles: &'a Profiles,
+ incremental_env: Option<bool>,
+
+ unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
+ files: Option<CompilationFiles<'a, 'cfg>>,
+}
+
+impl<'a, 'cfg> Context<'a, 'cfg> {
+ pub fn new(
+ ws: &'a Workspace<'cfg>,
+ resolve: &'a Resolve,
+ packages: &'a PackageSet<'cfg>,
+ config: &'cfg Config,
+ build_config: BuildConfig,
+ profiles: &'a Profiles,
+ ) -> CargoResult<Context<'a, 'cfg>> {
+ let incremental_env = match env::var("CARGO_INCREMENTAL") {
+ Ok(v) => Some(v == "1"),
+ Err(_) => None,
+ };
+
+ // Load up the jobserver that we'll use to manage our parallelism. This
+ // is the same as the GNU make implementation of a jobserver, and
+ // intentionally so! It's hoped that we can interact with GNU make and
+ // all share the same jobserver.
+ //
+ // Note that if we don't have a jobserver in our environment then we
+ // create our own, and we create it with `n-1` tokens because one token
+ // is ourself, a running process.
+ let jobserver = match config.jobserver_from_env() {
+ Some(c) => c.clone(),
+ None => Client::new(build_config.jobs as usize - 1)
+ .chain_err(|| "failed to create jobserver")?,
+ };
+ let mut cx = Context {
+ ws,
+ resolve,
+ packages,
+ config,
+ target_info: TargetInfo::default(),
+ host_info: TargetInfo::default(),
+ compilation: Compilation::new(config),
+ build_state: Arc::new(BuildState::new(&build_config)),
+ build_config,
+ fingerprints: HashMap::new(),
+ profiles,
+ compiled: HashSet::new(),
+ build_scripts: HashMap::new(),
+ build_explicit_deps: HashMap::new(),
+ links: Links::new(),
+ used_in_plugin: HashSet::new(),
+ incremental_env,
+ jobserver,
+ build_script_overridden: HashSet::new(),
+
+ unit_dependencies: HashMap::new(),
+ files: None,
+ };
+
+ cx.probe_target_info()?;
+ Ok(cx)
+ }
+
+ // Returns a mapping of the root package plus its immediate dependencies to
+ // where the compiled libraries are all located.
+ pub fn compile(
+ mut self,
+ units: &[Unit<'a>],
+ export_dir: Option<PathBuf>,
+ exec: &Arc<Executor>,
+ ) -> CargoResult<Compilation<'cfg>> {
+ let mut queue = JobQueue::new(&self);
+ self.prepare_units(export_dir, units)?;
+ self.prepare()?;
+ self.build_used_in_plugin_map(&units)?;
+ custom_build::build_map(&mut self, &units)?;
+
+ for unit in units.iter() {
+ // Build up a list of pending jobs, each of which represent
+ // compiling a particular package. No actual work is executed as
+ // part of this, that's all done next as part of the `execute`
+ // function which will run everything in order with proper
+ // parallelism.
+ super::compile(&mut self, &mut queue, unit, exec)?;
+ }
+
+ // Now that we've figured out everything that we're going to do, do it!
+ queue.execute(&mut self)?;
+
+ for unit in units.iter() {
+ for output in self.outputs(unit)?.iter() {
+ if output.flavor == FileFlavor::DebugInfo {
+ continue;
+ }
+
+ let bindst = match output.hardlink {
+ Some(ref link_dst) => link_dst,
+ None => &output.path,
+ };
+
+ if unit.profile.test {
+ self.compilation.tests.push((
+ unit.pkg.clone(),
+ unit.target.kind().clone(),
+ unit.target.name().to_string(),
+ output.path.clone(),
+ ));
+ } else if unit.target.is_bin() || unit.target.is_example() {
+ self.compilation.binaries.push(bindst.clone());
+ } else if unit.target.is_lib() {
+ let pkgid = unit.pkg.package_id().clone();
+ self.compilation
+ .libraries
+ .entry(pkgid)
+ .or_insert_with(HashSet::new)
+ .insert((unit.target.clone(), output.path.clone()));
+ }
+ }
+
+ for dep in self.dep_targets(unit).iter() {
+ if !unit.target.is_lib() {
+ continue;
+ }
+
+ if dep.profile.run_custom_build {
+ let out_dir = self.files().build_script_out_dir(dep).display().to_string();
+ self.compilation
+ .extra_env
+ .entry(dep.pkg.package_id().clone())
+ .or_insert_with(Vec::new)
+ .push(("OUT_DIR".to_string(), out_dir));
+ }
+
+ if !dep.target.is_lib() {
+ continue;
+ }
+ if dep.profile.doc {
+ continue;
+ }
+
+ let outputs = self.outputs(dep)?;
+ self.compilation
+ .libraries
+ .entry(unit.pkg.package_id().clone())
+ .or_insert_with(HashSet::new)
+ .extend(
+ outputs
+ .iter()
+ .map(|output| (dep.target.clone(), output.path.clone())),
+ );
+ }
+
+ let feats = self.resolve.features(unit.pkg.package_id());
+ if !feats.is_empty() {
+ self.compilation
+ .cfgs
+ .entry(unit.pkg.package_id().clone())
+ .or_insert_with(|| {
+ feats
+ .iter()
+ .map(|feat| format!("feature=\"{}\"", feat))
+ .collect()
+ });
+ }
+ let rustdocflags = self.rustdocflags_args(unit)?;
+ if !rustdocflags.is_empty() {
+ self.compilation
+ .rustdocflags
+ .entry(unit.pkg.package_id().clone())
+ .or_insert(rustdocflags);
+ }
+
+ super::output_depinfo(&mut self, unit)?;
+ }
+
+ for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() {
+ self.compilation
+ .cfgs
+ .entry(pkg.clone())
+ .or_insert_with(HashSet::new)
+ .extend(output.cfgs.iter().cloned());
+
+ self.compilation
+ .extra_env
+ .entry(pkg.clone())
+ .or_insert_with(Vec::new)
+ .extend(output.env.iter().cloned());
+
+ for dir in output.library_paths.iter() {
+ self.compilation.native_dirs.insert(dir.clone());
+ }
+ }
+ self.compilation.target = self.target_triple().to_string();
+ Ok(self.compilation)
+ }
+
+ pub fn prepare_units(
+ &mut self,
+ export_dir: Option<PathBuf>,
+ units: &[Unit<'a>],
+ ) -> CargoResult<()> {
+ let dest = if self.build_config.release {
+ "release"
+ } else {
+ "debug"
+ };
+ let host_layout = Layout::new(self.ws, None, dest)?;
+ let target_layout = match self.build_config.requested_target.as_ref() {
+ Some(target) => Some(Layout::new(self.ws, Some(target), dest)?),
+ None => None,
+ };
+
+ let deps = build_unit_dependencies(units, &self)?;
+ self.unit_dependencies = deps;
+ let files = CompilationFiles::new(
+ units,
+ host_layout,
+ target_layout,
+ export_dir,
+ self.ws,
+ &self,
+ );
+ self.files = Some(files);
+ Ok(())
+ }
+
+ /// Prepare this context, ensuring that all filesystem directories are in
+ /// place.
+ pub fn prepare(&mut self) -> CargoResult<()> {
+ let _p = profile::start("preparing layout");
+
+ self.files_mut()
+ .host
+ .prepare()
+ .chain_err(|| internal("couldn't prepare build directories"))?;
+ if let Some(ref mut target) = self.files.as_mut().unwrap().target {
+ target
+ .prepare()
+ .chain_err(|| internal("couldn't prepare build directories"))?;
+ }
+
+ self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf();
+
+ let files = self.files.as_ref().unwrap();
+ let layout = files.target.as_ref().unwrap_or(&files.host);
+ self.compilation.root_output = layout.dest().to_path_buf();
+ self.compilation.deps_output = layout.deps().to_path_buf();
+ Ok(())
+ }
+
+ /// Ensure that we've collected all target-specific information to compile
+ /// all the units mentioned in `units`.
+ fn probe_target_info(&mut self) -> CargoResult<()> {
+ let _p = profile::start("Context::probe_target_info");
+ debug!("probe_target_info");
+ let host_target_same = match self.requested_target() {
+ Some(s) if s != self.config.rustc()?.host => false,
+ _ => true,
+ };
+
+ self.host_info = TargetInfo::new(self, Kind::Host)?;
+ self.target_info = if host_target_same {
+ self.host_info.clone()
+ } else {
+ TargetInfo::new(self, Kind::Target)?
+ };
+ self.compilation.host_dylib_path = self.host_info.sysroot_libdir.clone();
+ self.compilation.target_dylib_path = self.target_info.sysroot_libdir.clone();
+ Ok(())
+ }
+
+ /// Builds up the `used_in_plugin` internal to this context from the list of
+ /// top-level units.
+ ///
+ /// This will recursively walk `units` and all of their dependencies to
+ /// determine which crate are going to be used in plugins or not.
+ pub fn build_used_in_plugin_map(&mut self, units: &[Unit<'a>]) -> CargoResult<()> {
+ let mut visited = HashSet::new();
+ for unit in units {
+ self.walk_used_in_plugin_map(unit, unit.target.for_host(), &mut visited)?;
+ }
+ Ok(())
+ }
+
+ fn walk_used_in_plugin_map(
+ &mut self,
+ unit: &Unit<'a>,
+ is_plugin: bool,
+ visited: &mut HashSet<(Unit<'a>, bool)>,
+ ) -> CargoResult<()> {
+ if !visited.insert((*unit, is_plugin)) {
+ return Ok(());
+ }
+ if is_plugin {
+ self.used_in_plugin.insert(*unit);
+ }
+ for unit in self.dep_targets(unit) {
+ self.walk_used_in_plugin_map(&unit, is_plugin || unit.target.for_host(), visited)?;
+ }
+ Ok(())
+ }
+
+ pub fn files(&self) -> &CompilationFiles<'a, 'cfg> {
+ self.files.as_ref().unwrap()
+ }
+
+ fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> {
+ self.files.as_mut().unwrap()
+ }
+
+ /// Return the host triple for this context
+ pub fn host_triple(&self) -> &str {
+ &self.build_config.host_triple
+ }
+
+ /// Return the target triple which this context is targeting.
+ pub fn target_triple(&self) -> &str {
+ self.requested_target()
+ .unwrap_or_else(|| self.host_triple())
+ }
+
+ /// Requested (not actual) target for the build
+ pub fn requested_target(&self) -> Option<&str> {
+ self.build_config.requested_target.as_ref().map(|s| &s[..])
+ }
+
+ /// Return the filenames that the given target for the given profile will
+ /// generate as a list of 3-tuples (filename, link_dst, linkable)
+ ///
+ /// - filename: filename rustc compiles to. (Often has metadata suffix).
+ /// - link_dst: Optional file to link/copy the result to (without metadata suffix)
+ /// - linkable: Whether possible to link against file (eg it's a library)
+ pub fn outputs(&mut self, unit: &Unit<'a>) -> CargoResult<Arc<Vec<OutputFile>>> {
+ self.files.as_ref().unwrap().outputs(unit, self)
+ }
+
+ /// For a package, return all targets which are registered as dependencies
+ /// for that package.
+ // TODO: this ideally should be `-> &[Unit<'a>]`
+ pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec<Unit<'a>> {
+ // If this build script's execution has been overridden then we don't
+ // actually depend on anything, we've reached the end of the dependency
+ // chain as we've got all the info we're gonna get.
+ //
+ // Note there's a subtlety about this piece of code! The
+ // `build_script_overridden` map here is populated in
+ // `custom_build::build_map` which you need to call before inspecting
+ // dependencies. However, that code itself calls this method and
+ // gets a full pre-filtered set of dependencies. This is not super
+ // obvious, and clear, but it does work at the moment.
+ if unit.profile.run_custom_build {
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+ if self.build_script_overridden.contains(&key) {
+ return Vec::new();
+ }
+ }
+ self.unit_dependencies[unit].clone()
+ }
+
+ fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool {
+ // If this dependency is only available for certain platforms,
+ // make sure we're only enabling it for that platform.
+ let platform = match dep.platform() {
+ Some(p) => p,
+ None => return true,
+ };
+ let (name, info) = match kind {
+ Kind::Host => (self.host_triple(), &self.host_info),
+ Kind::Target => (self.target_triple(), &self.target_info),
+ };
+ platform.matches(name, info.cfg())
+ }
+
+ /// Gets a package for the given package id.
+ pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
+ self.packages.get(id)
+ }
+
+ /// Get the user-specified linker for a particular host or target
+ pub fn linker(&self, kind: Kind) -> Option<&Path> {
+ self.target_config(kind).linker.as_ref().map(|s| s.as_ref())
+ }
+
+ /// Get the user-specified `ar` program for a particular host or target
+ pub fn ar(&self, kind: Kind) -> Option<&Path> {
+ self.target_config(kind).ar.as_ref().map(|s| s.as_ref())
+ }
+
+ /// Get the list of cfg printed out from the compiler for the specified kind
+ pub fn cfg(&self, kind: Kind) -> &[Cfg] {
+ let info = match kind {
+ Kind::Host => &self.host_info,
+ Kind::Target => &self.target_info,
+ };
+ info.cfg().unwrap_or(&[])
+ }
+
+ /// Get the target configuration for a particular host or target
+ fn target_config(&self, kind: Kind) -> &TargetConfig {
+ match kind {
+ Kind::Host => &self.build_config.host,
+ Kind::Target => &self.build_config.target,
+ }
+ }
+
+ /// Number of jobs specified for this build
+ pub fn jobs(&self) -> u32 {
+ self.build_config.jobs
+ }
+
+ pub fn lib_profile(&self) -> &'a Profile {
+ let (normal, test) = if self.build_config.release {
+ (&self.profiles.release, &self.profiles.bench_deps)
+ } else {
+ (&self.profiles.dev, &self.profiles.test_deps)
+ };
+ if self.build_config.test {
+ test
+ } else {
+ normal
+ }
+ }
+
+ pub fn build_script_profile(&self, _pkg: &PackageId) -> &'a Profile {
+ // TODO: should build scripts always be built with the same library
+ // profile? How is this controlled at the CLI layer?
+ self.lib_profile()
+ }
+
+ pub fn incremental_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
+ // There's a number of ways to configure incremental compilation right
+ // now. In order of descending priority (first is highest priority) we
+ // have:
+ //
+ // * `CARGO_INCREMENTAL` - this is blanket used unconditionally to turn
+ // on/off incremental compilation for any cargo subcommand. We'll
+ // respect this if set.
+ // * `build.incremental` - in `.cargo/config` this blanket key can
+ // globally for a system configure whether incremental compilation is
+ // enabled. Note that setting this to `true` will not actually affect
+ // all builds though. For example a `true` value doesn't enable
+ // release incremental builds, only dev incremental builds. This can
+ // be useful to globally disable incremental compilation like
+ // `CARGO_INCREMENTAL`.
+ // * `profile.dev.incremental` - in `Cargo.toml` specific profiles can
+ // be configured to enable/disable incremental compilation. This can
+ // be primarily used to disable incremental when buggy for a project.
+ // * Finally, each profile has a default for whether it will enable
+ // incremental compilation or not. Primarily development profiles
+ // have it enabled by default while release profiles have it disabled
+ // by default.
+ let global_cfg = self.config.get_bool("build.incremental")?.map(|c| c.val);
+ let incremental = match (self.incremental_env, global_cfg, unit.profile.incremental) {
+ (Some(v), _, _) => v,
+ (None, Some(false), _) => false,
+ (None, _, other) => other,
+ };
+
+ if !incremental {
+ return Ok(Vec::new());
+ }
+
+ // Only enable incremental compilation for sources the user can
+ // modify (aka path sources). For things that change infrequently,
+ // non-incremental builds yield better performance in the compiler
+ // itself (aka crates.io / git dependencies)
+ //
+ // (see also https://github.com/rust-lang/cargo/issues/3972)
+ if !unit.pkg.package_id().source_id().is_path() {
+ return Ok(Vec::new());
+ }
+
+ let dir = self.files().layout(unit.kind).incremental().display();
+ Ok(vec!["-C".to_string(), format!("incremental={}", dir)])
+ }
+
+ pub fn rustflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
+ env_args(
+ self.config,
+ &self.build_config,
+ self.info(&unit.kind).cfg(),
+ unit.kind,
+ "RUSTFLAGS",
+ )
+ }
+
+ pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
+ env_args(
+ self.config,
+ &self.build_config,
+ self.info(&unit.kind).cfg(),
+ unit.kind,
+ "RUSTDOCFLAGS",
+ )
+ }
+
+ pub fn show_warnings(&self, pkg: &PackageId) -> bool {
+ pkg.source_id().is_path() || self.config.extra_verbose()
+ }
+
+ fn info(&self, kind: &Kind) -> &TargetInfo {
+ match *kind {
+ Kind::Host => &self.host_info,
+ Kind::Target => &self.target_info,
+ }
+ }
+}
+
+/// Acquire extra flags to pass to the compiler from various locations.
+///
+/// The locations are:
+///
+/// - the `RUSTFLAGS` environment variable
+///
+/// then if this was not found
+///
+/// - `target.*.rustflags` from the manifest (Cargo.toml)
+/// - `target.cfg(..).rustflags` from the manifest
+///
+/// then if neither of these were found
+///
+/// - `build.rustflags` from the manifest
+///
+/// Note that if a `target` is specified, no args will be passed to host code (plugins, build
+/// scripts, ...), even if it is the same as the target.
+fn env_args(
+ config: &Config,
+ build_config: &BuildConfig,
+ target_cfg: Option<&[Cfg]>,
+ kind: Kind,
+ name: &str,
+) -> CargoResult<Vec<String>> {
+ // We *want* to apply RUSTFLAGS only to builds for the
+ // requested target architecture, and not to things like build
+ // scripts and plugins, which may be for an entirely different
+ // architecture. Cargo's present architecture makes it quite
+ // hard to only apply flags to things that are not build
+ // scripts and plugins though, so we do something more hacky
+ // instead to avoid applying the same RUSTFLAGS to multiple targets
+ // arches:
+ //
+ // 1) If --target is not specified we just apply RUSTFLAGS to
+ // all builds; they are all going to have the same target.
+ //
+ // 2) If --target *is* specified then we only apply RUSTFLAGS
+ // to compilation units with the Target kind, which indicates
+ // it was chosen by the --target flag.
+ //
+ // This means that, e.g. even if the specified --target is the
+ // same as the host, build scripts in plugins won't get
+ // RUSTFLAGS.
+ let compiling_with_target = build_config.requested_target.is_some();
+ let is_target_kind = kind == Kind::Target;
+
+ if compiling_with_target && !is_target_kind {
+ // This is probably a build script or plugin and we're
+ // compiling with --target. In this scenario there are
+ // no rustflags we can apply.
+ return Ok(Vec::new());
+ }
+
+ // First try RUSTFLAGS from the environment
+ if let Ok(a) = env::var(name) {
+ let args = a.split(' ')
+ .map(str::trim)
+ .filter(|s| !s.is_empty())
+ .map(str::to_string);
+ return Ok(args.collect());
+ }
+
+ let mut rustflags = Vec::new();
+
+ let name = name.chars()
+ .flat_map(|c| c.to_lowercase())
+ .collect::<String>();
+ // Then the target.*.rustflags value...
+ let target = build_config
+ .requested_target
+ .as_ref()
+ .unwrap_or(&build_config.host_triple);
+ let key = format!("target.{}.{}", target, name);
+ if let Some(args) = config.get_list_or_split_string(&key)? {
+ let args = args.val.into_iter();
+ rustflags.extend(args);
+ }
+ // ...including target.'cfg(...)'.rustflags
+ if let Some(target_cfg) = target_cfg {
+ if let Some(table) = config.get_table("target")? {
+ let cfgs = table.val.keys().filter_map(|t| {
+ if t.starts_with("cfg(") && t.ends_with(')') {
+ let cfg = &t[4..t.len() - 1];
+ CfgExpr::from_str(cfg).ok().and_then(|c| {
+ if c.matches(target_cfg) {
+ Some(t)
+ } else {
+ None
+ }
+ })
+ } else {
+ None
+ }
+ });
+
+ // Note that we may have multiple matching `[target]` sections and
+ // because we're passing flags to the compiler this can affect
+ // cargo's caching and whether it rebuilds. Ensure a deterministic
+ // ordering through sorting for now. We may perhaps one day wish to
+ // ensure a deterministic ordering via the order keys were defined
+ // in files perhaps.
+ let mut cfgs = cfgs.collect::<Vec<_>>();
+ cfgs.sort();
+
+ for n in cfgs {
+ let key = format!("target.{}.{}", n, name);
+ if let Some(args) = config.get_list_or_split_string(&key)? {
+ let args = args.val.into_iter();
+ rustflags.extend(args);
+ }
+ }
+ }
+ }
+
+ if !rustflags.is_empty() {
+ return Ok(rustflags);
+ }
+
+ // Then the build.rustflags value
+ let key = format!("build.{}", name);
+ if let Some(args) = config.get_list_or_split_string(&key)? {
+ let args = args.val.into_iter();
+ return Ok(args.collect());
+ }
+
+ Ok(Vec::new())
+}
--- /dev/null
+use std::cell::RefCell;
+use std::collections::hash_map::{Entry, HashMap};
+use std::path::PathBuf;
+use std::str::{self, FromStr};
+
+use super::{env_args, Context};
+use util::{CargoResult, CargoResultExt, Cfg, ProcessBuilder};
+use core::TargetKind;
+use super::Kind;
+
+#[derive(Clone, Default)]
+pub struct TargetInfo {
+ crate_type_process: Option<ProcessBuilder>,
+ crate_types: RefCell<HashMap<String, Option<(String, String)>>>,
+ cfg: Option<Vec<Cfg>>,
+ pub sysroot_libdir: Option<PathBuf>,
+}
+
+/// Type of each file generated by a Unit.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum FileFlavor {
+ /// Not a special file type.
+ Normal,
+ /// It is something you can link against (e.g. a library)
+ Linkable,
+ /// It is a piece of external debug information (e.g. *.dSYM and *.pdb)
+ DebugInfo,
+}
+
+pub struct FileType {
+ pub flavor: FileFlavor,
+ suffix: String,
+ prefix: String,
+ // wasm bin target will generate two files in deps such as
+ // "web-stuff.js" and "web_stuff.wasm". Note the different usages of
+ // "-" and "_". should_replace_hyphens is a flag to indicate that
+ // we need to convert the stem "web-stuff" to "web_stuff", so we
+ // won't miss "web_stuff.wasm".
+ should_replace_hyphens: bool,
+}
+
+impl FileType {
+ pub fn filename(&self, stem: &str) -> String {
+ let stem = if self.should_replace_hyphens {
+ stem.replace("-", "_")
+ } else {
+ stem.to_string()
+ };
+ format!("{}{}{}", self.prefix, stem, self.suffix)
+ }
+}
+
+impl TargetInfo {
+ pub fn new(cx: &Context, kind: Kind) -> CargoResult<TargetInfo> {
+ let rustflags = env_args(cx.config, &cx.build_config, None, kind, "RUSTFLAGS")?;
+ let mut process = cx.config.rustc()?.process();
+ process
+ .arg("-")
+ .arg("--crate-name")
+ .arg("___")
+ .arg("--print=file-names")
+ .args(&rustflags)
+ .env_remove("RUST_LOG");
+
+ if kind == Kind::Target {
+ process.arg("--target").arg(&cx.target_triple());
+ }
+
+ let crate_type_process = process.clone();
+ const KNOWN_CRATE_TYPES: &[&str] =
+ &["bin", "rlib", "dylib", "cdylib", "staticlib", "proc-macro"];
+ for crate_type in KNOWN_CRATE_TYPES.iter() {
+ process.arg("--crate-type").arg(crate_type);
+ }
+
+ let mut with_cfg = process.clone();
+ with_cfg.arg("--print=sysroot");
+ with_cfg.arg("--print=cfg");
+
+ let mut has_cfg_and_sysroot = true;
+ let output = with_cfg
+ .exec_with_output()
+ .or_else(|_| {
+ has_cfg_and_sysroot = false;
+ process.exec_with_output()
+ })
+ .chain_err(|| "failed to run `rustc` to learn about target-specific information")?;
+
+ let error = str::from_utf8(&output.stderr).unwrap();
+ let output = str::from_utf8(&output.stdout).unwrap();
+ let mut lines = output.lines();
+ let mut map = HashMap::new();
+ for crate_type in KNOWN_CRATE_TYPES {
+ let out = parse_crate_type(crate_type, error, &mut lines)?;
+ map.insert(crate_type.to_string(), out);
+ }
+
+ let mut sysroot_libdir = None;
+ if has_cfg_and_sysroot {
+ let line = match lines.next() {
+ Some(line) => line,
+ None => bail!(
+ "output of --print=sysroot missing when learning about \
+ target-specific information from rustc"
+ ),
+ };
+ let mut rustlib = PathBuf::from(line);
+ if kind == Kind::Host {
+ if cfg!(windows) {
+ rustlib.push("bin");
+ } else {
+ rustlib.push("lib");
+ }
+ sysroot_libdir = Some(rustlib);
+ } else {
+ rustlib.push("lib");
+ rustlib.push("rustlib");
+ rustlib.push(cx.target_triple());
+ rustlib.push("lib");
+ sysroot_libdir = Some(rustlib);
+ }
+ }
+
+ let cfg = if has_cfg_and_sysroot {
+ Some(lines.map(Cfg::from_str).collect::<CargoResult<_>>()?)
+ } else {
+ None
+ };
+
+ Ok(TargetInfo {
+ crate_type_process: Some(crate_type_process),
+ crate_types: RefCell::new(map),
+ cfg,
+ sysroot_libdir,
+ })
+ }
+
+ pub fn cfg(&self) -> Option<&[Cfg]> {
+ self.cfg.as_ref().map(|v| v.as_ref())
+ }
+
+ pub fn file_types(
+ &self,
+ crate_type: &str,
+ flavor: FileFlavor,
+ kind: &TargetKind,
+ target_triple: &str,
+ ) -> CargoResult<Option<Vec<FileType>>> {
+ let mut crate_types = self.crate_types.borrow_mut();
+ let entry = crate_types.entry(crate_type.to_string());
+ let crate_type_info = match entry {
+ Entry::Occupied(o) => &*o.into_mut(),
+ Entry::Vacant(v) => {
+ let value = self.discover_crate_type(v.key())?;
+ &*v.insert(value)
+ }
+ };
+ let (prefix, suffix) = match *crate_type_info {
+ Some((ref prefix, ref suffix)) => (prefix, suffix),
+ None => return Ok(None),
+ };
+ let mut ret = vec![
+ FileType {
+ suffix: suffix.clone(),
+ prefix: prefix.clone(),
+ flavor,
+ should_replace_hyphens: false,
+ },
+ ];
+
+ // rust-lang/cargo#4500
+ if target_triple.ends_with("pc-windows-msvc") && crate_type.ends_with("dylib")
+ && suffix == ".dll"
+ {
+ ret.push(FileType {
+ suffix: ".dll.lib".to_string(),
+ prefix: prefix.clone(),
+ flavor: FileFlavor::Normal,
+ should_replace_hyphens: false,
+ })
+ }
+
+ // rust-lang/cargo#4535
+ if target_triple.starts_with("wasm32-") && crate_type == "bin" && suffix == ".js" {
+ ret.push(FileType {
+ suffix: ".wasm".to_string(),
+ prefix: prefix.clone(),
+ flavor: FileFlavor::Normal,
+ should_replace_hyphens: true,
+ })
+ }
+
+ // rust-lang/cargo#4490, rust-lang/cargo#4960
+ // - only uplift debuginfo for binaries.
+ // tests are run directly from target/debug/deps/
+ // and examples are inside target/debug/examples/ which already have symbols next to them
+ // so no need to do anything.
+ if *kind == TargetKind::Bin {
+ if target_triple.contains("-apple-") {
+ ret.push(FileType {
+ suffix: ".dSYM".to_string(),
+ prefix: prefix.clone(),
+ flavor: FileFlavor::DebugInfo,
+ should_replace_hyphens: false,
+ })
+ } else if target_triple.ends_with("-msvc") {
+ ret.push(FileType {
+ suffix: ".pdb".to_string(),
+ prefix: prefix.clone(),
+ flavor: FileFlavor::DebugInfo,
+ should_replace_hyphens: false,
+ })
+ }
+ }
+
+ Ok(Some(ret))
+ }
+
+ fn discover_crate_type(&self, crate_type: &str) -> CargoResult<Option<(String, String)>> {
+ let mut process = self.crate_type_process.clone().unwrap();
+
+ process.arg("--crate-type").arg(crate_type);
+
+ let output = process.exec_with_output().chain_err(|| {
+ format!(
+ "failed to run `rustc` to learn about \
+ crate-type {} information",
+ crate_type
+ )
+ })?;
+
+ let error = str::from_utf8(&output.stderr).unwrap();
+ let output = str::from_utf8(&output.stdout).unwrap();
+ Ok(parse_crate_type(crate_type, error, &mut output.lines())?)
+ }
+}
+
+/// Takes rustc output (using specialized command line args), and calculates the file prefix and
+/// suffix for the given crate type, or returns None if the type is not supported. (e.g. for a
+/// rust library like libcargo.rlib, prefix = "lib", suffix = "rlib").
+///
+/// The caller needs to ensure that the lines object is at the correct line for the given crate
+/// type: this is not checked.
+// This function can not handle more than 1 file per type (with wasm32-unknown-emscripten, there
+// are 2 files for bin (.wasm and .js))
+fn parse_crate_type(
+ crate_type: &str,
+ error: &str,
+ lines: &mut str::Lines,
+) -> CargoResult<Option<(String, String)>> {
+ let not_supported = error.lines().any(|line| {
+ (line.contains("unsupported crate type") || line.contains("unknown crate type"))
+ && line.contains(crate_type)
+ });
+ if not_supported {
+ return Ok(None);
+ }
+ let line = match lines.next() {
+ Some(line) => line,
+ None => bail!(
+ "malformed output when learning about \
+ crate-type {} information",
+ crate_type
+ ),
+ };
+ let mut parts = line.trim().split("___");
+ let prefix = parts.next().unwrap();
+ let suffix = match parts.next() {
+ Some(part) => part,
+ None => bail!(
+ "output of --print=file-names has changed in \
+ the compiler, cannot parse"
+ ),
+ };
+
+ Ok(Some((prefix.to_string(), suffix.to_string())))
+}
--- /dev/null
+//! Constructs the dependency graph for compilation.
+//!
+//! Rust code is typically organized as a set of Cargo packages. The
+//! dependencies between the packages themselves are stored in the
+//! `Resolve` struct. However, we can't use that information as is for
+//! compilation! A package typically contains several targets, or crates,
+//! and these targets has inter-dependencies. For example, you need to
+//! compile the `lib` target before the `bin` one, and you need to compile
+//! `build.rs` before either of those.
+//!
+//! So, we need to lower the `Resolve`, which specifies dependencies between
+//! *packages*, to a graph of dependencies between their *targets*, and this
+//! is exactly what this module is doing! Well, almost exactly: another
+//! complication is that we might want to compile the same target several times
+//! (for example, with and without tests), so we actually build a dependency
+//! graph of `Unit`s, which capture these properties.
+
+use super::{Context, Kind, Unit};
+use std::collections::HashMap;
+use CargoResult;
+use core::dependency::Kind as DepKind;
+use core::Target;
+use core::Profile;
+
+pub fn build_unit_dependencies<'a, 'cfg>(
+ roots: &[Unit<'a>],
+ cx: &Context<'a, 'cfg>,
+) -> CargoResult<HashMap<Unit<'a>, Vec<Unit<'a>>>> {
+ let mut deps = HashMap::new();
+ for unit in roots.iter() {
+ deps_of(unit, cx, &mut deps)?;
+ }
+
+ Ok(deps)
+}
+
+fn deps_of<'a, 'b, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
+) -> CargoResult<&'b [Unit<'a>]> {
+ if !deps.contains_key(unit) {
+ let unit_deps = compute_deps(unit, cx, deps)?;
+ deps.insert(*unit, unit_deps.clone());
+ for unit in unit_deps {
+ deps_of(&unit, cx, deps)?;
+ }
+ }
+ Ok(deps[unit].as_ref())
+}
+
+/// For a package, return all targets which are registered as dependencies
+/// for that package.
+fn compute_deps<'a, 'b, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
+) -> CargoResult<Vec<Unit<'a>>> {
+ if unit.profile.run_custom_build {
+ return compute_deps_custom_build(unit, cx, deps);
+ } else if unit.profile.doc && !unit.profile.test {
+ return compute_deps_doc(unit, cx);
+ }
+
+ let id = unit.pkg.package_id();
+ let deps = cx.resolve.deps(id);
+ let mut ret = deps.filter(|dep| {
+ unit.pkg
+ .dependencies()
+ .iter()
+ .filter(|d| d.name() == dep.name() && d.version_req().matches(dep.version()))
+ .any(|d| {
+ // If this target is a build command, then we only want build
+ // dependencies, otherwise we want everything *other than* build
+ // dependencies.
+ if unit.target.is_custom_build() != d.is_build() {
+ return false;
+ }
+
+ // If this dependency is *not* a transitive dependency, then it
+ // only applies to test/example targets
+ if !d.is_transitive() && !unit.target.is_test() && !unit.target.is_example()
+ && !unit.profile.test
+ {
+ return false;
+ }
+
+ // If this dependency is only available for certain platforms,
+ // make sure we're only enabling it for that platform.
+ if !cx.dep_platform_activated(d, unit.kind) {
+ return false;
+ }
+
+ // If the dependency is optional, then we're only activating it
+ // if the corresponding feature was activated
+ if d.is_optional() && !cx.resolve.features(id).contains(&*d.name()) {
+ return false;
+ }
+
+ // If we've gotten past all that, then this dependency is
+ // actually used!
+ true
+ })
+ }).filter_map(|id| match cx.get_package(id) {
+ Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| {
+ let unit = Unit {
+ pkg,
+ target: t,
+ profile: lib_or_check_profile(unit, t, cx),
+ kind: unit.kind.for_target(t),
+ };
+ Ok(unit)
+ }),
+ Err(e) => Some(Err(e)),
+ })
+ .collect::<CargoResult<Vec<_>>>()?;
+
+ // If this target is a build script, then what we've collected so far is
+ // all we need. If this isn't a build script, then it depends on the
+ // build script if there is one.
+ if unit.target.is_custom_build() {
+ return Ok(ret);
+ }
+ ret.extend(dep_build_script(unit, cx));
+
+ // If this target is a binary, test, example, etc, then it depends on
+ // the library of the same package. The call to `resolve.deps` above
+ // didn't include `pkg` in the return values, so we need to special case
+ // it here and see if we need to push `(pkg, pkg_lib_target)`.
+ if unit.target.is_lib() && !unit.profile.doc {
+ return Ok(ret);
+ }
+ ret.extend(maybe_lib(unit, cx));
+
+ // Integration tests/benchmarks require binaries to be built
+ if unit.profile.test && (unit.target.is_test() || unit.target.is_bench()) {
+ ret.extend(
+ unit.pkg
+ .targets()
+ .iter()
+ .filter(|t| {
+ let no_required_features = Vec::new();
+
+ t.is_bin() &&
+ // Skip binaries with required features that have not been selected.
+ t.required_features().unwrap_or(&no_required_features).iter().all(|f| {
+ cx.resolve.features(id).contains(f)
+ })
+ })
+ .map(|t| Unit {
+ pkg: unit.pkg,
+ target: t,
+ profile: lib_or_check_profile(unit, t, cx),
+ kind: unit.kind.for_target(t),
+ }),
+ );
+ }
+ Ok(ret)
+}
+
+/// Returns the dependencies needed to run a build script.
+///
+/// The `unit` provided must represent an execution of a build script, and
+/// the returned set of units must all be run before `unit` is run.
+fn compute_deps_custom_build<'a, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+ deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
+) -> CargoResult<Vec<Unit<'a>>> {
+ // When not overridden, then the dependencies to run a build script are:
+ //
+ // 1. Compiling the build script itcx
+ // 2. For each immediate dependency of our package which has a `links`
+ // key, the execution of that build script.
+ let not_custom_build = unit.pkg
+ .targets()
+ .iter()
+ .find(|t| !t.is_custom_build())
+ .unwrap();
+ let tmp = Unit {
+ target: not_custom_build,
+ profile: &cx.profiles.dev,
+ ..*unit
+ };
+ let deps = deps_of(&tmp, cx, deps)?;
+ Ok(deps.iter()
+ .filter_map(|unit| {
+ if !unit.target.linkable() || unit.pkg.manifest().links().is_none() {
+ return None;
+ }
+ dep_build_script(unit, cx)
+ })
+ .chain(Some(Unit {
+ profile: cx.build_script_profile(unit.pkg.package_id()),
+ kind: Kind::Host, // build scripts always compiled for the host
+ ..*unit
+ }))
+ .collect())
+}
+
+/// Returns the dependencies necessary to document a package
+fn compute_deps_doc<'a, 'cfg>(
+ unit: &Unit<'a>,
+ cx: &Context<'a, 'cfg>,
+) -> CargoResult<Vec<Unit<'a>>> {
+ let deps = cx.resolve
+ .deps(unit.pkg.package_id())
+ .filter(|dep| {
+ unit.pkg
+ .dependencies()
+ .iter()
+ .filter(|d| d.name() == dep.name())
+ .any(|dep| match dep.kind() {
+ DepKind::Normal => cx.dep_platform_activated(dep, unit.kind),
+ _ => false,
+ })
+ })
+ .map(|dep| cx.get_package(dep));
+
+ // To document a library, we depend on dependencies actually being
+ // built. If we're documenting *all* libraries, then we also depend on
+ // the documentation of the library being built.
+ let mut ret = Vec::new();
+ for dep in deps {
+ let dep = dep?;
+ let lib = match dep.targets().iter().find(|t| t.is_lib()) {
+ Some(lib) => lib,
+ None => continue,
+ };
+ ret.push(Unit {
+ pkg: dep,
+ target: lib,
+ profile: lib_or_check_profile(unit, lib, cx),
+ kind: unit.kind.for_target(lib),
+ });
+ if cx.build_config.doc_all {
+ ret.push(Unit {
+ pkg: dep,
+ target: lib,
+ profile: &cx.profiles.doc,
+ kind: unit.kind.for_target(lib),
+ });
+ }
+ }
+
+ // Be sure to build/run the build script for documented libraries as
+ ret.extend(dep_build_script(unit, cx));
+
+ // If we document a binary, we need the library available
+ if unit.target.is_bin() {
+ ret.extend(maybe_lib(unit, cx));
+ }
+ Ok(ret)
+}
+
+fn maybe_lib<'a, 'cfg>(unit: &Unit<'a>, cx: &Context<'a, 'cfg>) -> Option<Unit<'a>> {
+ unit.pkg
+ .targets()
+ .iter()
+ .find(|t| t.linkable())
+ .map(|t| Unit {
+ pkg: unit.pkg,
+ target: t,
+ profile: lib_or_check_profile(unit, t, cx),
+ kind: unit.kind.for_target(t),
+ })
+}
+
+/// If a build script is scheduled to be run for the package specified by
+/// `unit`, this function will return the unit to run that build script.
+///
+/// Overriding a build script simply means that the running of the build
+/// script itself doesn't have any dependencies, so even in that case a unit
+/// of work is still returned. `None` is only returned if the package has no
+/// build script.
+fn dep_build_script<'a, 'cfg>(unit: &Unit<'a>, cx: &Context<'a, 'cfg>) -> Option<Unit<'a>> {
+ unit.pkg
+ .targets()
+ .iter()
+ .find(|t| t.is_custom_build())
+ .map(|t| Unit {
+ pkg: unit.pkg,
+ target: t,
+ profile: &cx.profiles.custom_build,
+ kind: unit.kind,
+ })
+}
+
+fn lib_or_check_profile<'a, 'cfg>(
+ unit: &Unit,
+ target: &Target,
+ cx: &Context<'a, 'cfg>,
+) -> &'a Profile {
+ if !target.is_custom_build() && !target.for_host()
+ && (unit.profile.check || (unit.profile.doc && !unit.profile.test))
+ {
+ return &cx.profiles.check;
+ }
+ cx.lib_profile()
+}
--- /dev/null
+use std::collections::{BTreeSet, HashSet};
+use std::collections::hash_map::{Entry, HashMap};
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::{Arc, Mutex};
+
+use core::PackageId;
+use util::{Cfg, Freshness};
+use util::errors::{CargoResult, CargoResultExt};
+use util::{self, internal, paths, profile};
+use util::machine_message;
+
+use super::job::Work;
+use super::{fingerprint, Context, Kind, Unit};
+
+/// Contains the parsed output of a custom build script.
+#[derive(Clone, Debug, Hash)]
+pub struct BuildOutput {
+ /// Paths to pass to rustc with the `-L` flag
+ pub library_paths: Vec<PathBuf>,
+ /// Names and link kinds of libraries, suitable for the `-l` flag
+ pub library_links: Vec<String>,
+ /// Various `--cfg` flags to pass to the compiler
+ pub cfgs: Vec<String>,
+ /// Additional environment variables to run the compiler with.
+ pub env: Vec<(String, String)>,
+ /// Metadata to pass to the immediate dependencies
+ pub metadata: Vec<(String, String)>,
+ /// Paths to trigger a rerun of this build script.
+ pub rerun_if_changed: Vec<PathBuf>,
+ /// Environment variables which, when changed, will cause a rebuild.
+ pub rerun_if_env_changed: Vec<String>,
+ /// Warnings generated by this build,
+ pub warnings: Vec<String>,
+}
+
+/// Map of packages to build info
+pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
+
+/// Build info and overrides
+pub struct BuildState {
+ pub outputs: Mutex<BuildMap>,
+ overrides: HashMap<(String, Kind), BuildOutput>,
+}
+
+#[derive(Default)]
+pub struct BuildScripts {
+ // Cargo will use this `to_link` vector to add -L flags to compiles as we
+ // propagate them upwards towards the final build. Note, however, that we
+ // need to preserve the ordering of `to_link` to be topologically sorted.
+ // This will ensure that build scripts which print their paths properly will
+ // correctly pick up the files they generated (if there are duplicates
+ // elsewhere).
+ //
+ // To preserve this ordering, the (id, kind) is stored in two places, once
+ // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain
+ // this as we're building interactively below to ensure that the memory
+ // usage here doesn't blow up too much.
+ //
+ // For more information, see #2354
+ pub to_link: Vec<(PackageId, Kind)>,
+ seen_to_link: HashSet<(PackageId, Kind)>,
+ pub plugins: BTreeSet<PackageId>,
+}
+
+pub struct BuildDeps {
+ pub build_script_output: PathBuf,
+ pub rerun_if_changed: Vec<PathBuf>,
+ pub rerun_if_env_changed: Vec<String>,
+}
+
+/// Prepares a `Work` that executes the target as a custom build script.
+///
+/// The `req` given is the requirement which this run of the build script will
+/// prepare work for. If the requirement is specified as both the target and the
+/// host platforms it is assumed that the two are equal and the build script is
+/// only run once (not twice).
+pub fn prepare<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<(Work, Work, Freshness)> {
+ let _p = profile::start(format!(
+ "build script prepare: {}/{}",
+ unit.pkg,
+ unit.target.name()
+ ));
+
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+ let overridden = cx.build_script_overridden.contains(&key);
+ let (work_dirty, work_fresh) = if overridden {
+ (Work::noop(), Work::noop())
+ } else {
+ build_work(cx, unit)?
+ };
+
+ // Now that we've prep'd our work, build the work needed to manage the
+ // fingerprint and then start returning that upwards.
+ let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?;
+
+ Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
+}
+
+fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> {
+ assert!(unit.profile.run_custom_build);
+ let dependencies = cx.dep_targets(unit);
+ let build_script_unit = dependencies
+ .iter()
+ .find(|d| !d.profile.run_custom_build && d.target.is_custom_build())
+ .expect("running a script not depending on an actual script");
+ let script_output = cx.files().build_script_dir(build_script_unit);
+ let build_output = cx.files().build_script_out_dir(unit);
+
+ // Building the command to execute
+ let to_exec = script_output.join(unit.target.name());
+
+ // Start preparing the process to execute, starting out with some
+ // environment variables. Note that the profile-related environment
+ // variables are not set with this the build script's profile but rather the
+ // package's library profile.
+ let profile = cx.lib_profile();
+ let to_exec = to_exec.into_os_string();
+ let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?;
+ cmd.env("OUT_DIR", &build_output)
+ .env("CARGO_MANIFEST_DIR", unit.pkg.root())
+ .env("NUM_JOBS", &cx.jobs().to_string())
+ .env(
+ "TARGET",
+ &match unit.kind {
+ Kind::Host => cx.host_triple(),
+ Kind::Target => cx.target_triple(),
+ },
+ )
+ .env("DEBUG", &profile.debuginfo.is_some().to_string())
+ .env("OPT_LEVEL", &profile.opt_level)
+ .env(
+ "PROFILE",
+ if cx.build_config.release {
+ "release"
+ } else {
+ "debug"
+ },
+ )
+ .env("HOST", cx.host_triple())
+ .env("RUSTC", &cx.config.rustc()?.path)
+ .env("RUSTDOC", &*cx.config.rustdoc()?)
+ .inherit_jobserver(&cx.jobserver);
+
+ if let Some(links) = unit.pkg.manifest().links() {
+ cmd.env("CARGO_MANIFEST_LINKS", links);
+ }
+
+ // Be sure to pass along all enabled features for this package, this is the
+ // last piece of statically known information that we have.
+ for feat in cx.resolve.features(unit.pkg.package_id()).iter() {
+ cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
+ }
+
+ let mut cfg_map = HashMap::new();
+ for cfg in cx.cfg(unit.kind) {
+ match *cfg {
+ Cfg::Name(ref n) => {
+ cfg_map.insert(n.clone(), None);
+ }
+ Cfg::KeyPair(ref k, ref v) => {
+ if let Some(ref mut values) =
+ *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new()))
+ {
+ values.push(v.clone())
+ }
+ }
+ }
+ }
+ for (k, v) in cfg_map {
+ let k = format!("CARGO_CFG_{}", super::envify(&k));
+ match v {
+ Some(list) => {
+ cmd.env(&k, list.join(","));
+ }
+ None => {
+ cmd.env(&k, "");
+ }
+ }
+ }
+
+ // Gather the set of native dependencies that this package has along with
+ // some other variables to close over.
+ //
+ // This information will be used at build-time later on to figure out which
+ // sorts of variables need to be discovered at that time.
+ let lib_deps = {
+ dependencies
+ .iter()
+ .filter_map(|unit| {
+ if unit.profile.run_custom_build {
+ Some((
+ unit.pkg.manifest().links().unwrap().to_string(),
+ unit.pkg.package_id().clone(),
+ ))
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>()
+ };
+ let pkg_name = unit.pkg.to_string();
+ let build_state = Arc::clone(&cx.build_state);
+ let id = unit.pkg.package_id().clone();
+ let (output_file, err_file, root_output_file) = {
+ let build_output_parent = build_output.parent().unwrap();
+ let output_file = build_output_parent.join("output");
+ let err_file = build_output_parent.join("stderr");
+ let root_output_file = build_output_parent.join("root-output");
+ (output_file, err_file, root_output_file)
+ };
+ let root_output = cx.files().target_root().to_path_buf();
+ let all = (
+ id.clone(),
+ pkg_name.clone(),
+ Arc::clone(&build_state),
+ output_file.clone(),
+ root_output.clone(),
+ );
+ let build_scripts = super::load_build_deps(cx, unit);
+ let kind = unit.kind;
+ let json_messages = cx.build_config.json_messages;
+
+ // Check to see if the build script has already run, and if it has keep
+ // track of whether it has told us about some explicit dependencies
+ let prev_root_output = paths::read_bytes(&root_output_file)
+ .and_then(|bytes| util::bytes2path(&bytes))
+ .unwrap_or_else(|_| cmd.get_cwd().unwrap().to_path_buf());
+ let prev_output =
+ BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output).ok();
+ let deps = BuildDeps::new(&output_file, prev_output.as_ref());
+ cx.build_explicit_deps.insert(*unit, deps);
+
+ fs::create_dir_all(&script_output)?;
+ fs::create_dir_all(&build_output)?;
+
+ // Prepare the unit of "dirty work" which will actually run the custom build
+ // command.
+ //
+ // Note that this has to do some extra work just before running the command
+ // to determine extra environment variables and such.
+ let dirty = Work::new(move |state| {
+ // Make sure that OUT_DIR exists.
+ //
+ // If we have an old build directory, then just move it into place,
+ // otherwise create it!
+ if fs::metadata(&build_output).is_err() {
+ fs::create_dir(&build_output).chain_err(|| {
+ internal(
+ "failed to create script output directory for \
+ build command",
+ )
+ })?;
+ }
+
+ // For all our native lib dependencies, pick up their metadata to pass
+ // along to this custom build command. We're also careful to augment our
+ // dynamic library search path in case the build script depended on any
+ // native dynamic libraries.
+ {
+ let build_state = build_state.outputs.lock().unwrap();
+ for (name, id) in lib_deps {
+ let key = (id.clone(), kind);
+ let state = build_state.get(&key).ok_or_else(|| {
+ internal(format!(
+ "failed to locate build state for env \
+ vars: {}/{:?}",
+ id, kind
+ ))
+ })?;
+ let data = &state.metadata;
+ for &(ref key, ref value) in data.iter() {
+ cmd.env(
+ &format!("DEP_{}_{}", super::envify(&name), super::envify(key)),
+ value,
+ );
+ }
+ }
+ if let Some(build_scripts) = build_scripts {
+ super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &root_output)?;
+ }
+ }
+
+ // And now finally, run the build command itself!
+ state.running(&cmd);
+ let output = cmd.exec_with_streaming(
+ &mut |out_line| {
+ state.stdout(out_line);
+ Ok(())
+ },
+ &mut |err_line| {
+ state.stderr(err_line);
+ Ok(())
+ },
+ true,
+ ).map_err(|e| {
+ format_err!(
+ "failed to run custom build command for `{}`\n{}",
+ pkg_name,
+ e
+ )
+ })?;
+
+ // After the build command has finished running, we need to be sure to
+ // remember all of its output so we can later discover precisely what it
+ // was, even if we don't run the build command again (due to freshness).
+ //
+ // This is also the location where we provide feedback into the build
+ // state informing what variables were discovered via our script as
+ // well.
+ paths::write(&output_file, &output.stdout)?;
+ paths::write(&err_file, &output.stderr)?;
+ paths::write(&root_output_file, util::path2bytes(&root_output)?)?;
+ let parsed_output =
+ BuildOutput::parse(&output.stdout, &pkg_name, &root_output, &root_output)?;
+
+ if json_messages {
+ let library_paths = parsed_output
+ .library_paths
+ .iter()
+ .map(|l| l.display().to_string())
+ .collect::<Vec<_>>();
+ machine_message::emit(&machine_message::BuildScript {
+ package_id: &id,
+ linked_libs: &parsed_output.library_links,
+ linked_paths: &library_paths,
+ cfgs: &parsed_output.cfgs,
+ env: &parsed_output.env,
+ });
+ }
+
+ build_state.insert(id, kind, parsed_output);
+ Ok(())
+ });
+
+ // Now that we've prepared our work-to-do, we need to prepare the fresh work
+ // itself to run when we actually end up just discarding what we calculated
+ // above.
+ let fresh = Work::new(move |_tx| {
+ let (id, pkg_name, build_state, output_file, root_output) = all;
+ let output = match prev_output {
+ Some(output) => output,
+ None => {
+ BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output)?
+ }
+ };
+ build_state.insert(id, kind, output);
+ Ok(())
+ });
+
+ Ok((dirty, fresh))
+}
+
+impl BuildState {
+ pub fn new(config: &super::BuildConfig) -> BuildState {
+ let mut overrides = HashMap::new();
+ let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
+ let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
+ for ((name, output), kind) in i1.chain(i2) {
+ overrides.insert((name.clone(), kind), output.clone());
+ }
+ BuildState {
+ outputs: Mutex::new(HashMap::new()),
+ overrides,
+ }
+ }
+
+ fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
+ self.outputs.lock().unwrap().insert((id, kind), output);
+ }
+}
+
+impl BuildOutput {
+ pub fn parse_file(
+ path: &Path,
+ pkg_name: &str,
+ root_output_when_generated: &Path,
+ root_output: &Path,
+ ) -> CargoResult<BuildOutput> {
+ let contents = paths::read_bytes(path)?;
+ BuildOutput::parse(&contents, pkg_name, root_output_when_generated, root_output)
+ }
+
+ // Parses the output of a script.
+ // The `pkg_name` is used for error messages.
+ pub fn parse(
+ input: &[u8],
+ pkg_name: &str,
+ root_output_when_generated: &Path,
+ root_output: &Path,
+ ) -> CargoResult<BuildOutput> {
+ let mut library_paths = Vec::new();
+ let mut library_links = Vec::new();
+ let mut cfgs = Vec::new();
+ let mut env = Vec::new();
+ let mut metadata = Vec::new();
+ let mut rerun_if_changed = Vec::new();
+ let mut rerun_if_env_changed = Vec::new();
+ let mut warnings = Vec::new();
+ let whence = format!("build script of `{}`", pkg_name);
+
+ for line in input.split(|b| *b == b'\n') {
+ let line = match str::from_utf8(line) {
+ Ok(line) => line.trim(),
+ Err(..) => continue,
+ };
+ let mut iter = line.splitn(2, ':');
+ if iter.next() != Some("cargo") {
+ // skip this line since it doesn't start with "cargo:"
+ continue;
+ }
+ let data = match iter.next() {
+ Some(val) => val,
+ None => continue,
+ };
+
+ // getting the `key=value` part of the line
+ let mut iter = data.splitn(2, '=');
+ let key = iter.next();
+ let value = iter.next();
+ let (key, value) = match (key, value) {
+ (Some(a), Some(b)) => (a, b.trim_right()),
+ // line started with `cargo:` but didn't match `key=value`
+ _ => bail!("Wrong output in {}: `{}`", whence, line),
+ };
+
+ let path = |val: &str| match Path::new(val).strip_prefix(root_output_when_generated) {
+ Ok(path) => root_output.join(path),
+ Err(_) => PathBuf::from(val),
+ };
+
+ match key {
+ "rustc-flags" => {
+ let (paths, links) = BuildOutput::parse_rustc_flags(value, &whence)?;
+ library_links.extend(links.into_iter());
+ library_paths.extend(paths.into_iter());
+ }
+ "rustc-link-lib" => library_links.push(value.to_string()),
+ "rustc-link-search" => library_paths.push(path(value)),
+ "rustc-cfg" => cfgs.push(value.to_string()),
+ "rustc-env" => env.push(BuildOutput::parse_rustc_env(value, &whence)?),
+ "warning" => warnings.push(value.to_string()),
+ "rerun-if-changed" => rerun_if_changed.push(path(value)),
+ "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()),
+ _ => metadata.push((key.to_string(), value.to_string())),
+ }
+ }
+
+ Ok(BuildOutput {
+ library_paths,
+ library_links,
+ cfgs,
+ env,
+ metadata,
+ rerun_if_changed,
+ rerun_if_env_changed,
+ warnings,
+ })
+ }
+
+ pub fn parse_rustc_flags(
+ value: &str,
+ whence: &str,
+ ) -> CargoResult<(Vec<PathBuf>, Vec<String>)> {
+ let value = value.trim();
+ let mut flags_iter = value
+ .split(|c: char| c.is_whitespace())
+ .filter(|w| w.chars().any(|c| !c.is_whitespace()));
+ let (mut library_paths, mut library_links) = (Vec::new(), Vec::new());
+ while let Some(flag) = flags_iter.next() {
+ if flag != "-l" && flag != "-L" {
+ bail!(
+ "Only `-l` and `-L` flags are allowed in {}: `{}`",
+ whence,
+ value
+ )
+ }
+ let value = match flags_iter.next() {
+ Some(v) => v,
+ None => bail!(
+ "Flag in rustc-flags has no value in {}: `{}`",
+ whence,
+ value
+ ),
+ };
+ match flag {
+ "-l" => library_links.push(value.to_string()),
+ "-L" => library_paths.push(PathBuf::from(value)),
+
+ // was already checked above
+ _ => bail!("only -l and -L flags are allowed"),
+ };
+ }
+ Ok((library_paths, library_links))
+ }
+
+ pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> {
+ let mut iter = value.splitn(2, '=');
+ let name = iter.next();
+ let val = iter.next();
+ match (name, val) {
+ (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())),
+ _ => bail!("Variable rustc-env has no value in {}: {}", whence, value),
+ }
+ }
+}
+
+impl BuildDeps {
+ pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps {
+ BuildDeps {
+ build_script_output: output_file.to_path_buf(),
+ rerun_if_changed: output
+ .map(|p| &p.rerun_if_changed)
+ .cloned()
+ .unwrap_or_default(),
+ rerun_if_env_changed: output
+ .map(|p| &p.rerun_if_env_changed)
+ .cloned()
+ .unwrap_or_default(),
+ }
+ }
+}
+
+/// Compute the `build_scripts` map in the `Context` which tracks what build
+/// scripts each package depends on.
+///
+/// The global `build_scripts` map lists for all (package, kind) tuples what set
+/// of packages' build script outputs must be considered. For example this lists
+/// all dependencies' `-L` flags which need to be propagated transitively.
+///
+/// The given set of targets to this function is the initial set of
+/// targets/profiles which are being built.
+pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> {
+ let mut ret = HashMap::new();
+ for unit in units {
+ build(&mut ret, cx, unit)?;
+ }
+ cx.build_scripts
+ .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v))));
+ return Ok(());
+
+ // Recursive function to build up the map we're constructing. This function
+ // memoizes all of its return values as it goes along.
+ fn build<'a, 'b, 'cfg>(
+ out: &'a mut HashMap<Unit<'b>, BuildScripts>,
+ cx: &mut Context<'b, 'cfg>,
+ unit: &Unit<'b>,
+ ) -> CargoResult<&'a BuildScripts> {
+ // Do a quick pre-flight check to see if we've already calculated the
+ // set of dependencies.
+ if out.contains_key(unit) {
+ return Ok(&out[unit]);
+ }
+
+ {
+ let key = unit.pkg
+ .manifest()
+ .links()
+ .map(|l| (l.to_string(), unit.kind));
+ let build_state = &cx.build_state;
+ if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) {
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+ cx.build_script_overridden.insert(key.clone());
+ build_state
+ .outputs
+ .lock()
+ .unwrap()
+ .insert(key, output.clone());
+ }
+ }
+
+ let mut ret = BuildScripts::default();
+
+ if !unit.target.is_custom_build() && unit.pkg.has_custom_build() {
+ add_to_link(&mut ret, unit.pkg.package_id(), unit.kind);
+ }
+
+ // We want to invoke the compiler deterministically to be cache-friendly
+ // to rustc invocation caching schemes, so be sure to generate the same
+ // set of build script dependency orderings via sorting the targets that
+ // come out of the `Context`.
+ let mut targets = cx.dep_targets(unit);
+ targets.sort_by_key(|u| u.pkg.package_id());
+
+ for unit in targets.iter() {
+ let dep_scripts = build(out, cx, unit)?;
+
+ if unit.target.for_host() {
+ ret.plugins
+ .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned());
+ } else if unit.target.linkable() {
+ for &(ref pkg, kind) in dep_scripts.to_link.iter() {
+ add_to_link(&mut ret, pkg, kind);
+ }
+ }
+ }
+
+ match out.entry(*unit) {
+ Entry::Vacant(entry) => Ok(entry.insert(ret)),
+ Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"),
+ }
+ }
+
+ // When adding an entry to 'to_link' we only actually push it on if the
+ // script hasn't seen it yet (e.g. we don't push on duplicates).
+ fn add_to_link(scripts: &mut BuildScripts, pkg: &PackageId, kind: Kind) {
+ if scripts.seen_to_link.insert((pkg.clone(), kind)) {
+ scripts.to_link.push((pkg.clone(), kind));
+ }
+ }
+}
--- /dev/null
+use std::env;
+use std::fs;
+use std::hash::{self, Hasher};
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Mutex};
+
+use filetime::FileTime;
+use serde::ser::{self, Serialize};
+use serde::de::{self, Deserialize};
+use serde_json;
+
+use core::{Edition, Package, TargetKind};
+use util;
+use util::{internal, profile, Dirty, Fresh, Freshness};
+use util::errors::{CargoResult, CargoResultExt};
+use util::paths;
+
+use super::job::Work;
+use super::context::{Context, FileFlavor, Unit};
+use super::custom_build::BuildDeps;
+
+/// A tuple result of the `prepare_foo` functions in this module.
+///
+/// The first element of the triple is whether the target in question is
+/// currently fresh or not, and the second two elements are work to perform when
+/// the target is dirty or fresh, respectively.
+///
+/// Both units of work are always generated because a fresh package may still be
+/// rebuilt if some upstream dependency changes.
+pub type Preparation = (Freshness, Work, Work);
+
+/// Prepare the necessary work for the fingerprint for a specific target.
+///
+/// When dealing with fingerprints, cargo gets to choose what granularity
+/// "freshness" is considered at. One option is considering freshness at the
+/// package level. This means that if anything in a package changes, the entire
+/// package is rebuilt, unconditionally. This simplicity comes at a cost,
+/// however, in that test-only changes will cause libraries to be rebuilt, which
+/// is quite unfortunate!
+///
+/// The cost was deemed high enough that fingerprints are now calculated at the
+/// layer of a target rather than a package. Each target can then be kept track
+/// of separately and only rebuilt as necessary. This requires cargo to
+/// understand what the inputs are to a target, so we drive rustc with the
+/// --dep-info flag to learn about all input files to a unit of compilation.
+///
+/// This function will calculate the fingerprint for a target and prepare the
+/// work necessary to either write the fingerprint or copy over all fresh files
+/// from the old directories to their new locations.
+pub fn prepare_target<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<Preparation> {
+ let _p = profile::start(format!(
+ "fingerprint: {} / {}",
+ unit.pkg.package_id(),
+ unit.target.name()
+ ));
+ let new = cx.files().fingerprint_dir(unit);
+ let loc = new.join(&filename(cx, unit));
+
+ debug!("fingerprint at: {}", loc.display());
+
+ let fingerprint = calculate(cx, unit)?;
+ let compare = compare_old_fingerprint(&loc, &*fingerprint);
+ log_compare(unit, &compare);
+
+ // If our comparison failed (e.g. we're going to trigger a rebuild of this
+ // crate), then we also ensure the source of the crate passes all
+ // verification checks before we build it.
+ //
+ // The `Source::verify` method is intended to allow sources to execute
+ // pre-build checks to ensure that the relevant source code is all
+ // up-to-date and as expected. This is currently used primarily for
+ // directory sources which will use this hook to perform an integrity check
+ // on all files in the source to ensure they haven't changed. If they have
+ // changed then an error is issued.
+ if compare.is_err() {
+ let source_id = unit.pkg.package_id().source_id();
+ let sources = cx.packages.sources();
+ let source = sources
+ .get(source_id)
+ .ok_or_else(|| internal("missing package source"))?;
+ source.verify(unit.pkg.package_id())?;
+ }
+
+ let root = cx.files().out_dir(unit);
+ let mut missing_outputs = false;
+ if unit.profile.doc {
+ missing_outputs = !root.join(unit.target.crate_name())
+ .join("index.html")
+ .exists();
+ } else {
+ for output in cx.outputs(unit)?.iter() {
+ if output.flavor == FileFlavor::DebugInfo {
+ continue;
+ }
+ missing_outputs |= !output.path.exists();
+ if let Some(ref link_dst) = output.hardlink {
+ missing_outputs |= !link_dst.exists();
+ }
+ }
+ }
+
+ let allow_failure = unit.profile.rustc_args.is_some();
+ let target_root = cx.files().target_root().to_path_buf();
+ let write_fingerprint = Work::new(move |_| {
+ match fingerprint.update_local(&target_root) {
+ Ok(()) => {}
+ Err(..) if allow_failure => return Ok(()),
+ Err(e) => return Err(e),
+ }
+ write_fingerprint(&loc, &*fingerprint)
+ });
+
+ let fresh = compare.is_ok() && !missing_outputs;
+ Ok((
+ if fresh { Fresh } else { Dirty },
+ write_fingerprint,
+ Work::noop(),
+ ))
+}
+
+/// A fingerprint can be considered to be a "short string" representing the
+/// state of a world for a package.
+///
+/// If a fingerprint ever changes, then the package itself needs to be
+/// recompiled. Inputs to the fingerprint include source code modifications,
+/// compiler flags, compiler version, etc. This structure is not simply a
+/// `String` due to the fact that some fingerprints cannot be calculated lazily.
+///
+/// Path sources, for example, use the mtime of the corresponding dep-info file
+/// as a fingerprint (all source files must be modified *before* this mtime).
+/// This dep-info file is not generated, however, until after the crate is
+/// compiled. As a result, this structure can be thought of as a fingerprint
+/// to-be. The actual value can be calculated via `hash()`, but the operation
+/// may fail as some files may not have been generated.
+///
+/// Note that dependencies are taken into account for fingerprints because rustc
+/// requires that whenever an upstream crate is recompiled that all downstream
+/// dependants are also recompiled. This is typically tracked through
+/// `DependencyQueue`, but it also needs to be retained here because Cargo can
+/// be interrupted while executing, losing the state of the `DependencyQueue`
+/// graph.
+#[derive(Serialize, Deserialize)]
+pub struct Fingerprint {
+ rustc: u64,
+ features: String,
+ target: u64,
+ profile: u64,
+ path: u64,
+ #[serde(serialize_with = "serialize_deps", deserialize_with = "deserialize_deps")]
+ deps: Vec<(String, Arc<Fingerprint>)>,
+ local: Vec<LocalFingerprint>,
+ #[serde(skip_serializing, skip_deserializing)]
+ memoized_hash: Mutex<Option<u64>>,
+ rustflags: Vec<String>,
+ edition: Edition,
+}
+
+fn serialize_deps<S>(deps: &[(String, Arc<Fingerprint>)], ser: S) -> Result<S::Ok, S::Error>
+where
+ S: ser::Serializer,
+{
+ deps.iter()
+ .map(|&(ref a, ref b)| (a, b.hash()))
+ .collect::<Vec<_>>()
+ .serialize(ser)
+}
+
+fn deserialize_deps<'de, D>(d: D) -> Result<Vec<(String, Arc<Fingerprint>)>, D::Error>
+where
+ D: de::Deserializer<'de>,
+{
+ let decoded = <Vec<(String, u64)>>::deserialize(d)?;
+ Ok(decoded
+ .into_iter()
+ .map(|(name, hash)| {
+ (
+ name,
+ Arc::new(Fingerprint {
+ rustc: 0,
+ target: 0,
+ profile: 0,
+ path: 0,
+ local: vec![LocalFingerprint::Precalculated(String::new())],
+ features: String::new(),
+ deps: Vec::new(),
+ memoized_hash: Mutex::new(Some(hash)),
+ edition: Edition::Edition2015,
+ rustflags: Vec::new(),
+ }),
+ )
+ })
+ .collect())
+}
+
+#[derive(Serialize, Deserialize, Hash)]
+enum LocalFingerprint {
+ Precalculated(String),
+ MtimeBased(MtimeSlot, PathBuf),
+ EnvBased(String, Option<String>),
+}
+
+impl LocalFingerprint {
+ fn mtime(root: &Path, mtime: Option<FileTime>, path: &Path) -> LocalFingerprint {
+ let mtime = MtimeSlot(Mutex::new(mtime));
+ assert!(path.is_absolute());
+ let path = path.strip_prefix(root).unwrap_or(path);
+ LocalFingerprint::MtimeBased(mtime, path.to_path_buf())
+ }
+}
+
+struct MtimeSlot(Mutex<Option<FileTime>>);
+
+impl Fingerprint {
+ fn update_local(&self, root: &Path) -> CargoResult<()> {
+ let mut hash_busted = false;
+ for local in self.local.iter() {
+ match *local {
+ LocalFingerprint::MtimeBased(ref slot, ref path) => {
+ let path = root.join(path);
+ let meta = fs::metadata(&path)
+ .chain_err(|| internal(format!("failed to stat `{}`", path.display())))?;
+ let mtime = FileTime::from_last_modification_time(&meta);
+ *slot.0.lock().unwrap() = Some(mtime);
+ }
+ LocalFingerprint::EnvBased(..) | LocalFingerprint::Precalculated(..) => continue,
+ }
+ hash_busted = true;
+ }
+
+ if hash_busted {
+ *self.memoized_hash.lock().unwrap() = None;
+ }
+ Ok(())
+ }
+
+ fn hash(&self) -> u64 {
+ if let Some(s) = *self.memoized_hash.lock().unwrap() {
+ return s;
+ }
+ let ret = util::hash_u64(self);
+ *self.memoized_hash.lock().unwrap() = Some(ret);
+ ret
+ }
+
+ fn compare(&self, old: &Fingerprint) -> CargoResult<()> {
+ if self.rustc != old.rustc {
+ bail!("rust compiler has changed")
+ }
+ if self.features != old.features {
+ bail!(
+ "features have changed: {} != {}",
+ self.features,
+ old.features
+ )
+ }
+ if self.target != old.target {
+ bail!("target configuration has changed")
+ }
+ if self.path != old.path {
+ bail!("path to the compiler has changed")
+ }
+ if self.profile != old.profile {
+ bail!("profile configuration has changed")
+ }
+ if self.rustflags != old.rustflags {
+ bail!("RUSTFLAGS has changed")
+ }
+ if self.local.len() != old.local.len() {
+ bail!("local lens changed");
+ }
+ if self.edition != old.edition {
+ bail!("edition changed")
+ }
+ for (new, old) in self.local.iter().zip(&old.local) {
+ match (new, old) {
+ (
+ &LocalFingerprint::Precalculated(ref a),
+ &LocalFingerprint::Precalculated(ref b),
+ ) => {
+ if a != b {
+ bail!("precalculated components have changed: {} != {}", a, b)
+ }
+ }
+ (
+ &LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap),
+ &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp),
+ ) => {
+ let on_disk_mtime = on_disk_mtime.0.lock().unwrap();
+ let previously_built_mtime = previously_built_mtime.0.lock().unwrap();
+
+ let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) {
+ (None, None) => false,
+ (Some(_), None) | (None, Some(_)) => true,
+ (Some(on_disk), Some(previously_built)) => on_disk > previously_built,
+ };
+
+ if should_rebuild {
+ bail!(
+ "mtime based components have changed: previously {:?} now {:?}, \
+ paths are {:?} and {:?}",
+ *previously_built_mtime,
+ *on_disk_mtime,
+ ap,
+ bp
+ )
+ }
+ }
+ (
+ &LocalFingerprint::EnvBased(ref akey, ref avalue),
+ &LocalFingerprint::EnvBased(ref bkey, ref bvalue),
+ ) => {
+ if *akey != *bkey {
+ bail!("env vars changed: {} != {}", akey, bkey);
+ }
+ if *avalue != *bvalue {
+ bail!(
+ "env var `{}` changed: previously {:?} now {:?}",
+ akey,
+ bvalue,
+ avalue
+ )
+ }
+ }
+ _ => bail!("local fingerprint type has changed"),
+ }
+ }
+
+ if self.deps.len() != old.deps.len() {
+ bail!("number of dependencies has changed")
+ }
+ for (a, b) in self.deps.iter().zip(old.deps.iter()) {
+ if a.1.hash() != b.1.hash() {
+ bail!("new ({}) != old ({})", a.0, b.0)
+ }
+ }
+ Ok(())
+ }
+}
+
+impl hash::Hash for Fingerprint {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ let Fingerprint {
+ rustc,
+ ref features,
+ target,
+ path,
+ profile,
+ ref deps,
+ ref local,
+ edition,
+ ref rustflags,
+ ..
+ } = *self;
+ (
+ rustc,
+ features,
+ target,
+ path,
+ profile,
+ local,
+ edition,
+ rustflags,
+ ).hash(h);
+
+ h.write_usize(deps.len());
+ for &(ref name, ref fingerprint) in deps {
+ name.hash(h);
+ // use memoized dep hashes to avoid exponential blowup
+ h.write_u64(Fingerprint::hash(fingerprint));
+ }
+ }
+}
+
+impl hash::Hash for MtimeSlot {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ self.0.lock().unwrap().hash(h)
+ }
+}
+
+impl ser::Serialize for MtimeSlot {
+ fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
+ where
+ S: ser::Serializer,
+ {
+ self.0
+ .lock()
+ .unwrap()
+ .map(|ft| (ft.seconds_relative_to_1970(), ft.nanoseconds()))
+ .serialize(s)
+ }
+}
+
+impl<'de> de::Deserialize<'de> for MtimeSlot {
+ fn deserialize<D>(d: D) -> Result<MtimeSlot, D::Error>
+ where
+ D: de::Deserializer<'de>,
+ {
+ let kind: Option<(u64, u32)> = de::Deserialize::deserialize(d)?;
+ Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| {
+ FileTime::from_seconds_since_1970(s, n)
+ }))))
+ }
+}
+
+/// Calculates the fingerprint for a package/target pair.
+///
+/// This fingerprint is used by Cargo to learn about when information such as:
+///
+/// * A non-path package changes (changes version, changes revision, etc).
+/// * Any dependency changes
+/// * The compiler changes
+/// * The set of features a package is built with changes
+/// * The profile a target is compiled with changes (e.g. opt-level changes)
+///
+/// Information like file modification time is only calculated for path
+/// dependencies and is calculated in `calculate_target_fresh`.
+fn calculate<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<Arc<Fingerprint>> {
+ if let Some(s) = cx.fingerprints.get(unit) {
+ return Ok(Arc::clone(s));
+ }
+
+ // Next, recursively calculate the fingerprint for all of our dependencies.
+ //
+ // Skip the fingerprints of build scripts as they may not always be
+ // available and the dirtiness propagation for modification is tracked
+ // elsewhere. Also skip fingerprints of binaries because they don't actually
+ // induce a recompile, they're just dependencies in the sense that they need
+ // to be built.
+ let deps = cx.dep_targets(unit);
+ let deps = deps.iter()
+ .filter(|u| !u.target.is_custom_build() && !u.target.is_bin())
+ .map(|unit| {
+ calculate(cx, unit).map(|fingerprint| (unit.pkg.package_id().to_string(), fingerprint))
+ })
+ .collect::<CargoResult<Vec<_>>>()?;
+
+ // And finally, calculate what our own local fingerprint is
+ let local = if use_dep_info(unit) {
+ let dep_info = dep_info_loc(cx, unit);
+ let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?;
+ LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info)
+ } else {
+ let fingerprint = pkg_fingerprint(cx, unit.pkg)?;
+ LocalFingerprint::Precalculated(fingerprint)
+ };
+ let mut deps = deps;
+ deps.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
+ let extra_flags = if unit.profile.doc {
+ cx.rustdocflags_args(unit)?
+ } else {
+ cx.rustflags_args(unit)?
+ };
+ let fingerprint = Arc::new(Fingerprint {
+ rustc: util::hash_u64(&cx.config.rustc()?.verbose_version),
+ target: util::hash_u64(&unit.target),
+ profile: util::hash_u64(&(&unit.profile, cx.incremental_args(unit)?)),
+ // Note that .0 is hashed here, not .1 which is the cwd. That doesn't
+ // actually affect the output artifact so there's no need to hash it.
+ path: util::hash_u64(&super::path_args(cx, unit).0),
+ features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())),
+ deps,
+ local: vec![local],
+ memoized_hash: Mutex::new(None),
+ edition: unit.pkg.manifest().edition(),
+ rustflags: extra_flags,
+ });
+ cx.fingerprints.insert(*unit, Arc::clone(&fingerprint));
+ Ok(fingerprint)
+}
+
+// We want to use the mtime for files if we're a path source, but if we're a
+// git/registry source, then the mtime of files may fluctuate, but they won't
+// change so long as the source itself remains constant (which is the
+// responsibility of the source)
+fn use_dep_info(unit: &Unit) -> bool {
+ let path = unit.pkg.summary().source_id().is_path();
+ !unit.profile.doc && path
+}
+
+/// Prepare the necessary work for the fingerprint of a build command.
+///
+/// Build commands are located on packages, not on targets. Additionally, we
+/// don't have --dep-info to drive calculation of the fingerprint of a build
+/// command. This brings up an interesting predicament which gives us a few
+/// options to figure out whether a build command is dirty or not:
+///
+/// 1. A build command is dirty if *any* file in a package changes. In theory
+/// all files are candidate for being used by the build command.
+/// 2. A build command is dirty if any file in a *specific directory* changes.
+/// This may lose information as it may require files outside of the specific
+/// directory.
+/// 3. A build command must itself provide a dep-info-like file stating how it
+/// should be considered dirty or not.
+///
+/// The currently implemented solution is option (1), although it is planned to
+/// migrate to option (2) in the near future.
+pub fn prepare_build_cmd<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<Preparation> {
+ let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id()));
+ let new = cx.files().fingerprint_dir(unit);
+ let loc = new.join("build");
+
+ debug!("fingerprint at: {}", loc.display());
+
+ let (local, output_path) = build_script_local_fingerprints(cx, unit)?;
+ let mut fingerprint = Fingerprint {
+ rustc: 0,
+ target: 0,
+ profile: 0,
+ path: 0,
+ features: String::new(),
+ deps: Vec::new(),
+ local,
+ memoized_hash: Mutex::new(None),
+ edition: Edition::Edition2015,
+ rustflags: Vec::new(),
+ };
+ let compare = compare_old_fingerprint(&loc, &fingerprint);
+ log_compare(unit, &compare);
+
+ // When we write out the fingerprint, we may want to actually change the
+ // kind of fingerprint being recorded. If we started out, then the previous
+ // run of the build script (or if it had never run before) may indicate to
+ // use the `Precalculated` variant with the `pkg_fingerprint`. If the build
+ // script then prints `rerun-if-changed`, however, we need to record what's
+ // necessary for that fingerprint.
+ //
+ // Hence, if there were some `rerun-if-changed` directives forcibly change
+ // the kind of fingerprint by reinterpreting the dependencies output by the
+ // build script.
+ let state = Arc::clone(&cx.build_state);
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+ let pkg_root = unit.pkg.root().to_path_buf();
+ let target_root = cx.files().target_root().to_path_buf();
+ let write_fingerprint = Work::new(move |_| {
+ if let Some(output_path) = output_path {
+ let outputs = state.outputs.lock().unwrap();
+ let outputs = &outputs[&key];
+ if !outputs.rerun_if_changed.is_empty() || !outputs.rerun_if_env_changed.is_empty() {
+ let deps = BuildDeps::new(&output_path, Some(outputs));
+ fingerprint.local = local_fingerprints_deps(&deps, &target_root, &pkg_root);
+ fingerprint.update_local(&target_root)?;
+ }
+ }
+ write_fingerprint(&loc, &fingerprint)
+ });
+
+ Ok((
+ if compare.is_ok() { Fresh } else { Dirty },
+ write_fingerprint,
+ Work::noop(),
+ ))
+}
+
+fn build_script_local_fingerprints<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<(Vec<LocalFingerprint>, Option<PathBuf>)> {
+ let state = cx.build_state.outputs.lock().unwrap();
+ // First up, if this build script is entirely overridden, then we just
+ // return the hash of what we overrode it with.
+ //
+ // Note that the `None` here means that we don't want to update the local
+ // fingerprint afterwards because this is all just overridden.
+ if let Some(output) = state.get(&(unit.pkg.package_id().clone(), unit.kind)) {
+ debug!("override local fingerprints deps");
+ let s = format!(
+ "overridden build state with hash: {}",
+ util::hash_u64(output)
+ );
+ return Ok((vec![LocalFingerprint::Precalculated(s)], None));
+ }
+
+ // Next up we look at the previously listed dependencies for the build
+ // script. If there are none then we're in the "old mode" where we just
+ // assume that we're changed if anything in the packaged changed. The
+ // `Some` here though means that we want to update our local fingerprints
+ // after we're done as running this build script may have created more
+ // dependencies.
+ let deps = &cx.build_explicit_deps[unit];
+ let output = deps.build_script_output.clone();
+ if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() {
+ debug!("old local fingerprints deps");
+ let s = pkg_fingerprint(cx, unit.pkg)?;
+ return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output)));
+ }
+
+ // Ok so now we're in "new mode" where we can have files listed as
+ // dependencies as well as env vars listed as dependencies. Process them all
+ // here.
+ Ok((
+ local_fingerprints_deps(deps, cx.files().target_root(), unit.pkg.root()),
+ Some(output),
+ ))
+}
+
+fn local_fingerprints_deps(
+ deps: &BuildDeps,
+ target_root: &Path,
+ pkg_root: &Path,
+) -> Vec<LocalFingerprint> {
+ debug!("new local fingerprints deps");
+ let mut local = Vec::new();
+ if !deps.rerun_if_changed.is_empty() {
+ let output = &deps.build_script_output;
+ let deps = deps.rerun_if_changed.iter().map(|p| pkg_root.join(p));
+ let mtime = mtime_if_fresh(output, deps);
+ local.push(LocalFingerprint::mtime(target_root, mtime, output));
+ }
+
+ for var in deps.rerun_if_env_changed.iter() {
+ let val = env::var(var).ok();
+ local.push(LocalFingerprint::EnvBased(var.clone(), val));
+ }
+
+ local
+}
+
+fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> {
+ let hash = fingerprint.hash();
+ debug!("write fingerprint: {}", loc.display());
+ paths::write(loc, util::to_hex(hash).as_bytes())?;
+ paths::write(
+ &loc.with_extension("json"),
+ &serde_json::to_vec(&fingerprint).unwrap(),
+ )?;
+ Ok(())
+}
+
+/// Prepare for work when a package starts to build
+pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> {
+ let new1 = cx.files().fingerprint_dir(unit);
+
+ if fs::metadata(&new1).is_err() {
+ fs::create_dir(&new1)?;
+ }
+
+ Ok(())
+}
+
+pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf {
+ cx.files()
+ .fingerprint_dir(unit)
+ .join(&format!("dep-{}", filename(cx, unit)))
+}
+
+fn compare_old_fingerprint(loc: &Path, new_fingerprint: &Fingerprint) -> CargoResult<()> {
+ let old_fingerprint_short = paths::read(loc)?;
+ let new_hash = new_fingerprint.hash();
+
+ if util::to_hex(new_hash) == old_fingerprint_short {
+ return Ok(());
+ }
+
+ let old_fingerprint_json = paths::read(&loc.with_extension("json"))?;
+ let old_fingerprint = serde_json::from_str(&old_fingerprint_json)
+ .chain_err(|| internal("failed to deserialize json"))?;
+ new_fingerprint.compare(&old_fingerprint)
+}
+
+fn log_compare(unit: &Unit, compare: &CargoResult<()>) {
+ let ce = match *compare {
+ Ok(..) => return,
+ Err(ref e) => e,
+ };
+ info!("fingerprint error for {}: {}", unit.pkg, ce);
+
+ for cause in ce.causes().skip(1) {
+ info!(" cause: {}", cause);
+ }
+}
+
+// Parse the dep-info into a list of paths
+pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult<Option<Vec<PathBuf>>> {
+ let data = match paths::read_bytes(dep_info) {
+ Ok(data) => data,
+ Err(_) => return Ok(None),
+ };
+ let paths = data.split(|&x| x == 0)
+ .filter(|x| !x.is_empty())
+ .map(|p| util::bytes2path(p).map(|p| pkg.root().join(p)))
+ .collect::<Result<Vec<_>, _>>()?;
+ if paths.is_empty() {
+ Ok(None)
+ } else {
+ Ok(Some(paths))
+ }
+}
+
+fn dep_info_mtime_if_fresh(pkg: &Package, dep_info: &Path) -> CargoResult<Option<FileTime>> {
+ if let Some(paths) = parse_dep_info(pkg, dep_info)? {
+ Ok(mtime_if_fresh(dep_info, paths.iter()))
+ } else {
+ Ok(None)
+ }
+}
+
+fn pkg_fingerprint(cx: &Context, pkg: &Package) -> CargoResult<String> {
+ let source_id = pkg.package_id().source_id();
+ let sources = cx.packages.sources();
+
+ let source = sources
+ .get(source_id)
+ .ok_or_else(|| internal("missing package source"))?;
+ source.fingerprint(pkg)
+}
+
+fn mtime_if_fresh<I>(output: &Path, paths: I) -> Option<FileTime>
+where
+ I: IntoIterator,
+ I::Item: AsRef<Path>,
+{
+ let meta = match fs::metadata(output) {
+ Ok(meta) => meta,
+ Err(..) => return None,
+ };
+ let mtime = FileTime::from_last_modification_time(&meta);
+
+ let any_stale = paths.into_iter().any(|path| {
+ let path = path.as_ref();
+ let meta = match fs::metadata(path) {
+ Ok(meta) => meta,
+ Err(..) => {
+ info!("stale: {} -- missing", path.display());
+ return true;
+ }
+ };
+ let mtime2 = FileTime::from_last_modification_time(&meta);
+ if mtime2 > mtime {
+ info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime);
+ true
+ } else {
+ false
+ }
+ });
+
+ if any_stale {
+ None
+ } else {
+ Some(mtime)
+ }
+}
+
+fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String {
+ // file_stem includes metadata hash. Thus we have a different
+ // fingerprint for every metadata hash version. This works because
+ // even if the package is fresh, we'll still link the fresh target
+ let file_stem = cx.files().file_stem(unit);
+ let kind = match *unit.target.kind() {
+ TargetKind::Lib(..) => "lib",
+ TargetKind::Bin => "bin",
+ TargetKind::Test => "integration-test",
+ TargetKind::ExampleBin | TargetKind::ExampleLib(..) => "example",
+ TargetKind::Bench => "bench",
+ TargetKind::CustomBuild => "build-script",
+ };
+ let flavor = if unit.profile.test {
+ "test-"
+ } else if unit.profile.doc {
+ "doc-"
+ } else {
+ ""
+ };
+ format!("{}{}-{}", flavor, kind, file_stem)
+}
+
+/// Parses the dep-info file coming out of rustc into a Cargo-specific format.
+///
+/// This function will parse `rustc_dep_info` as a makefile-style dep info to
+/// learn about the all files which a crate depends on. This is then
+/// re-serialized into the `cargo_dep_info` path in a Cargo-specific format.
+///
+/// The `pkg_root` argument here is the absolute path to the directory
+/// containing `Cargo.toml` for this crate that was compiled. The paths listed
+/// in the rustc dep-info file may or may not be absolute but we'll want to
+/// consider all of them relative to the `root` specified.
+///
+/// The `rustc_cwd` argument is the absolute path to the cwd of the compiler
+/// when it was invoked.
+///
+/// The serialized Cargo format will contain a list of files, all of which are
+/// relative if they're under `root`. or absolute if they're elsewhere.
+pub fn translate_dep_info(
+ rustc_dep_info: &Path,
+ cargo_dep_info: &Path,
+ pkg_root: &Path,
+ rustc_cwd: &Path,
+) -> CargoResult<()> {
+ let target = parse_rustc_dep_info(rustc_dep_info)?;
+ let deps = &target
+ .get(0)
+ .ok_or_else(|| internal("malformed dep-info format, no targets".to_string()))?
+ .1;
+
+ let mut new_contents = Vec::new();
+ for file in deps {
+ let absolute = rustc_cwd.join(file);
+ let path = absolute.strip_prefix(pkg_root).unwrap_or(&absolute);
+ new_contents.extend(util::path2bytes(path)?);
+ new_contents.push(0);
+ }
+ paths::write(cargo_dep_info, &new_contents)?;
+ Ok(())
+}
+
+pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult<Vec<(String, Vec<String>)>> {
+ let contents = paths::read(rustc_dep_info)?;
+ contents
+ .lines()
+ .filter_map(|l| l.find(": ").map(|i| (l, i)))
+ .map(|(line, pos)| {
+ let target = &line[..pos];
+ let mut deps = line[pos + 2..].split_whitespace();
+
+ let mut ret = Vec::new();
+ while let Some(s) = deps.next() {
+ let mut file = s.to_string();
+ while file.ends_with('\\') {
+ file.pop();
+ file.push(' ');
+ file.push_str(deps.next().ok_or_else(|| {
+ internal("malformed dep-info format, trailing \\".to_string())
+ })?);
+ }
+ ret.push(file);
+ }
+ Ok((target.to_string(), ret))
+ })
+ .collect()
+}
--- /dev/null
+use std::fmt;
+
+use util::{CargoResult, Dirty, Fresh, Freshness};
+use super::job_queue::JobState;
+
+pub struct Job {
+ dirty: Work,
+ fresh: Work,
+}
+
+/// Each proc should send its description before starting.
+/// It should send either once or close immediately.
+pub struct Work {
+ inner: Box<for<'a, 'b> FnBox<&'a JobState<'b>, CargoResult<()>> + Send>,
+}
+
+trait FnBox<A, R> {
+ fn call_box(self: Box<Self>, a: A) -> R;
+}
+
+impl<A, R, F: FnOnce(A) -> R> FnBox<A, R> for F {
+ fn call_box(self: Box<F>, a: A) -> R {
+ (*self)(a)
+ }
+}
+
+impl Work {
+ pub fn new<F>(f: F) -> Work
+ where
+ F: FnOnce(&JobState) -> CargoResult<()> + Send + 'static,
+ {
+ Work { inner: Box::new(f) }
+ }
+
+ pub fn noop() -> Work {
+ Work::new(|_| Ok(()))
+ }
+
+ pub fn call(self, tx: &JobState) -> CargoResult<()> {
+ self.inner.call_box(tx)
+ }
+
+ pub fn then(self, next: Work) -> Work {
+ Work::new(move |state| {
+ self.call(state)?;
+ next.call(state)
+ })
+ }
+}
+
+impl Job {
+ /// Create a new job representing a unit of work.
+ pub fn new(dirty: Work, fresh: Work) -> Job {
+ Job { dirty, fresh }
+ }
+
+ /// Consumes this job by running it, returning the result of the
+ /// computation.
+ pub fn run(self, fresh: Freshness, state: &JobState) -> CargoResult<()> {
+ match fresh {
+ Fresh => self.fresh.call(state),
+ Dirty => self.dirty.call(state),
+ }
+ }
+}
+
+impl fmt::Debug for Job {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "Job {{ ... }}")
+ }
+}
--- /dev/null
+use std::collections::HashSet;
+use std::collections::hash_map::HashMap;
+use std::fmt;
+use std::io;
+use std::mem;
+use std::sync::mpsc::{channel, Receiver, Sender};
+
+use crossbeam::{self, Scope};
+use jobserver::{Acquired, HelperThread};
+
+use core::{PackageId, Profile, Target};
+use util::{Config, DependencyQueue, Dirty, Fresh, Freshness};
+use util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder};
+use handle_error;
+
+use super::{Context, Kind, Unit};
+use super::job::Job;
+
+/// A management structure of the entire dependency graph to compile.
+///
+/// This structure is backed by the `DependencyQueue` type and manages the
+/// actual compilation step of each package. Packages enqueue units of work and
+/// then later on the entire graph is processed and compiled.
+pub struct JobQueue<'a> {
+ queue: DependencyQueue<Key<'a>, Vec<(Job, Freshness)>>,
+ tx: Sender<Message<'a>>,
+ rx: Receiver<Message<'a>>,
+ active: usize,
+ pending: HashMap<Key<'a>, PendingBuild>,
+ compiled: HashSet<&'a PackageId>,
+ documented: HashSet<&'a PackageId>,
+ counts: HashMap<&'a PackageId, usize>,
+ is_release: bool,
+}
+
+/// A helper structure for metadata about the state of a building package.
+struct PendingBuild {
+ /// Number of jobs currently active
+ amt: usize,
+ /// Current freshness state of this package. Any dirty target within a
+ /// package will cause the entire package to become dirty.
+ fresh: Freshness,
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Hash)]
+struct Key<'a> {
+ pkg: &'a PackageId,
+ target: &'a Target,
+ profile: &'a Profile,
+ kind: Kind,
+}
+
+pub struct JobState<'a> {
+ tx: Sender<Message<'a>>,
+}
+
+enum Message<'a> {
+ Run(String),
+ Stdout(String),
+ Stderr(String),
+ Token(io::Result<Acquired>),
+ Finish(Key<'a>, CargoResult<()>),
+}
+
+impl<'a> JobState<'a> {
+ pub fn running(&self, cmd: &ProcessBuilder) {
+ let _ = self.tx.send(Message::Run(cmd.to_string()));
+ }
+
+ pub fn stdout(&self, out: &str) {
+ let _ = self.tx.send(Message::Stdout(out.to_string()));
+ }
+
+ pub fn stderr(&self, err: &str) {
+ let _ = self.tx.send(Message::Stderr(err.to_string()));
+ }
+}
+
+impl<'a> JobQueue<'a> {
+ pub fn new<'cfg>(cx: &Context<'a, 'cfg>) -> JobQueue<'a> {
+ let (tx, rx) = channel();
+ JobQueue {
+ queue: DependencyQueue::new(),
+ tx,
+ rx,
+ active: 0,
+ pending: HashMap::new(),
+ compiled: HashSet::new(),
+ documented: HashSet::new(),
+ counts: HashMap::new(),
+ is_release: cx.build_config.release,
+ }
+ }
+
+ pub fn enqueue<'cfg>(
+ &mut self,
+ cx: &Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+ job: Job,
+ fresh: Freshness,
+ ) -> CargoResult<()> {
+ let key = Key::new(unit);
+ let deps = key.dependencies(cx)?;
+ self.queue
+ .queue(Fresh, key, Vec::new(), &deps)
+ .push((job, fresh));
+ *self.counts.entry(key.pkg).or_insert(0) += 1;
+ Ok(())
+ }
+
+ /// Execute all jobs necessary to build the dependency graph.
+ ///
+ /// This function will spawn off `config.jobs()` workers to build all of the
+ /// necessary dependencies, in order. Freshness is propagated as far as
+ /// possible along each dependency chain.
+ pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> {
+ let _p = profile::start("executing the job graph");
+ self.queue.queue_finished();
+
+ // We need to give a handle to the send half of our message queue to the
+ // jobserver helper thread. Unfortunately though we need the handle to be
+ // `'static` as that's typically what's required when spawning a
+ // thread!
+ //
+ // To work around this we transmute the `Sender` to a static lifetime.
+ // we're only sending "longer living" messages and we should also
+ // destroy all references to the channel before this function exits as
+ // the destructor for the `helper` object will ensure the associated
+ // thread is no longer running.
+ //
+ // As a result, this `transmute` to a longer lifetime should be safe in
+ // practice.
+ let tx = self.tx.clone();
+ let tx = unsafe { mem::transmute::<Sender<Message<'a>>, Sender<Message<'static>>>(tx) };
+ let helper = cx.jobserver
+ .clone()
+ .into_helper_thread(move |token| {
+ drop(tx.send(Message::Token(token)));
+ })
+ .chain_err(|| "failed to create helper thread for jobserver management")?;
+
+ crossbeam::scope(|scope| self.drain_the_queue(cx, scope, &helper))
+ }
+
+ fn drain_the_queue(
+ &mut self,
+ cx: &mut Context,
+ scope: &Scope<'a>,
+ jobserver_helper: &HelperThread,
+ ) -> CargoResult<()> {
+ let mut tokens = Vec::new();
+ let mut queue = Vec::new();
+ trace!("queue: {:#?}", self.queue);
+
+ // Iteratively execute the entire dependency graph. Each turn of the
+ // loop starts out by scheduling as much work as possible (up to the
+ // maximum number of parallel jobs we have tokens for). A local queue
+ // is maintained separately from the main dependency queue as one
+ // dequeue may actually dequeue quite a bit of work (e.g. 10 binaries
+ // in one project).
+ //
+ // After a job has finished we update our internal state if it was
+ // successful and otherwise wait for pending work to finish if it failed
+ // and then immediately return.
+ let mut error = None;
+ loop {
+ // Dequeue as much work as we can, learning about everything
+ // possible that can run. Note that this is also the point where we
+ // start requesting job tokens. Each job after the first needs to
+ // request a token.
+ while let Some((fresh, key, jobs)) = self.queue.dequeue() {
+ let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| f.combine(fresh));
+ self.pending.insert(
+ key,
+ PendingBuild {
+ amt: jobs.len(),
+ fresh: total_fresh,
+ },
+ );
+ for (job, f) in jobs {
+ queue.push((key, job, f.combine(fresh)));
+ if self.active + queue.len() > 0 {
+ jobserver_helper.request_token();
+ }
+ }
+ }
+
+ // Now that we've learned of all possible work that we can execute
+ // try to spawn it so long as we've got a jobserver token which says
+ // we're able to perform some parallel work.
+ while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() {
+ let (key, job, fresh) = queue.remove(0);
+ self.run(key, fresh, job, cx.config, scope)?;
+ }
+
+ // If after all that we're not actually running anything then we're
+ // done!
+ if self.active == 0 {
+ break;
+ }
+
+ // And finally, before we block waiting for the next event, drop any
+ // excess tokens we may have accidentally acquired. Due to how our
+ // jobserver interface is architected we may acquire a token that we
+ // don't actually use, and if this happens just relinquish it back
+ // to the jobserver itself.
+ tokens.truncate(self.active - 1);
+
+ match self.rx.recv().unwrap() {
+ Message::Run(cmd) => {
+ cx.config.shell().verbose(|c| c.status("Running", &cmd))?;
+ }
+ Message::Stdout(out) => {
+ if cx.config.extra_verbose() {
+ println!("{}", out);
+ }
+ }
+ Message::Stderr(err) => {
+ if cx.config.extra_verbose() {
+ writeln!(cx.config.shell().err(), "{}", err)?;
+ }
+ }
+ Message::Finish(key, result) => {
+ info!("end: {:?}", key);
+ self.active -= 1;
+ if self.active > 0 {
+ assert!(!tokens.is_empty());
+ drop(tokens.pop());
+ }
+ match result {
+ Ok(()) => self.finish(key, cx)?,
+ Err(e) => {
+ let msg = "The following warnings were emitted during compilation:";
+ self.emit_warnings(Some(msg), key, cx)?;
+
+ if self.active > 0 {
+ error = Some(format_err!("build failed"));
+ handle_error(e, &mut *cx.config.shell());
+ cx.config.shell().warn(
+ "build failed, waiting for other \
+ jobs to finish...",
+ )?;
+ } else {
+ error = Some(e);
+ }
+ }
+ }
+ }
+ Message::Token(acquired_token) => {
+ tokens.push(acquired_token.chain_err(|| "failed to acquire jobserver token")?);
+ }
+ }
+ }
+
+ let build_type = if self.is_release { "release" } else { "dev" };
+ let profile = cx.lib_profile();
+ let mut opt_type = String::from(if profile.opt_level == "0" {
+ "unoptimized"
+ } else {
+ "optimized"
+ });
+ if profile.debuginfo.is_some() {
+ opt_type += " + debuginfo";
+ }
+ let duration = cx.config.creation_time().elapsed();
+ let time_elapsed = format!(
+ "{}.{1:.2} secs",
+ duration.as_secs(),
+ duration.subsec_nanos() / 10_000_000
+ );
+ if self.queue.is_empty() {
+ let message = format!(
+ "{} [{}] target(s) in {}",
+ build_type, opt_type, time_elapsed
+ );
+ cx.config.shell().status("Finished", message)?;
+ Ok(())
+ } else if let Some(e) = error {
+ Err(e)
+ } else {
+ debug!("queue: {:#?}", self.queue);
+ Err(internal("finished with jobs still left in the queue"))
+ }
+ }
+
+ /// Executes a job in the `scope` given, pushing the spawned thread's
+ /// handled onto `threads`.
+ fn run(
+ &mut self,
+ key: Key<'a>,
+ fresh: Freshness,
+ job: Job,
+ config: &Config,
+ scope: &Scope<'a>,
+ ) -> CargoResult<()> {
+ info!("start: {:?}", key);
+
+ self.active += 1;
+ *self.counts.get_mut(key.pkg).unwrap() -= 1;
+
+ let my_tx = self.tx.clone();
+ let doit = move || {
+ let res = job.run(fresh, &JobState { tx: my_tx.clone() });
+ my_tx.send(Message::Finish(key, res)).unwrap();
+ };
+ match fresh {
+ Freshness::Fresh => doit(),
+ Freshness::Dirty => {
+ scope.spawn(doit);
+ }
+ }
+
+ // Print out some nice progress information
+ self.note_working_on(config, &key, fresh)?;
+
+ Ok(())
+ }
+
+ fn emit_warnings(&self, msg: Option<&str>, key: Key<'a>, cx: &mut Context) -> CargoResult<()> {
+ let output = cx.build_state.outputs.lock().unwrap();
+ if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) {
+ if let Some(msg) = msg {
+ if !output.warnings.is_empty() {
+ writeln!(cx.config.shell().err(), "{}\n", msg)?;
+ }
+ }
+
+ for warning in output.warnings.iter() {
+ cx.config.shell().warn(warning)?;
+ }
+
+ if !output.warnings.is_empty() && msg.is_some() {
+ // Output an empty line.
+ writeln!(cx.config.shell().err(), "")?;
+ }
+ }
+
+ Ok(())
+ }
+
+ fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> {
+ if key.profile.run_custom_build && cx.show_warnings(key.pkg) {
+ self.emit_warnings(None, key, cx)?;
+ }
+
+ let state = self.pending.get_mut(&key).unwrap();
+ state.amt -= 1;
+ if state.amt == 0 {
+ self.queue.finish(&key, state.fresh);
+ }
+ Ok(())
+ }
+
+ // This isn't super trivial because we don't want to print loads and
+ // loads of information to the console, but we also want to produce a
+ // faithful representation of what's happening. This is somewhat nuanced
+ // as a package can start compiling *very* early on because of custom
+ // build commands and such.
+ //
+ // In general, we try to print "Compiling" for the first nontrivial task
+ // run for a package, regardless of when that is. We then don't print
+ // out any more information for a package after we've printed it once.
+ fn note_working_on(
+ &mut self,
+ config: &Config,
+ key: &Key<'a>,
+ fresh: Freshness,
+ ) -> CargoResult<()> {
+ if (self.compiled.contains(key.pkg) && !key.profile.doc)
+ || (self.documented.contains(key.pkg) && key.profile.doc)
+ {
+ return Ok(());
+ }
+
+ match fresh {
+ // Any dirty stage which runs at least one command gets printed as
+ // being a compiled package
+ Dirty => {
+ if key.profile.doc {
+ if !key.profile.test {
+ self.documented.insert(key.pkg);
+ config.shell().status("Documenting", key.pkg)?;
+ }
+ } else {
+ self.compiled.insert(key.pkg);
+ config.shell().status("Compiling", key.pkg)?;
+ }
+ }
+ Fresh if self.counts[key.pkg] == 0 => {
+ self.compiled.insert(key.pkg);
+ config.shell().verbose(|c| c.status("Fresh", key.pkg))?;
+ }
+ Fresh => {}
+ }
+ Ok(())
+ }
+}
+
+impl<'a> Key<'a> {
+ fn new(unit: &Unit<'a>) -> Key<'a> {
+ Key {
+ pkg: unit.pkg.package_id(),
+ target: unit.target,
+ profile: unit.profile,
+ kind: unit.kind,
+ }
+ }
+
+ fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult<Vec<Key<'a>>> {
+ let unit = Unit {
+ pkg: cx.get_package(self.pkg)?,
+ target: self.target,
+ profile: self.profile,
+ kind: self.kind,
+ };
+ let targets = cx.dep_targets(&unit);
+ Ok(targets
+ .iter()
+ .filter_map(|unit| {
+ // Binaries aren't actually needed to *compile* tests, just to run
+ // them, so we don't include this dependency edge in the job graph.
+ if self.target.is_test() && unit.target.is_bin() {
+ None
+ } else {
+ Some(Key::new(unit))
+ }
+ })
+ .collect())
+ }
+}
+
+impl<'a> fmt::Debug for Key<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(
+ f,
+ "{} => {}/{} => {:?}",
+ self.pkg, self.target, self.profile, self.kind
+ )
+ }
+}
--- /dev/null
+//! Management of the directory layout of a build
+//!
+//! The directory layout is a little tricky at times, hence a separate file to
+//! house this logic. The current layout looks like this:
+//!
+//! ```ignore
+//! # This is the root directory for all output, the top-level package
+//! # places all of its output here.
+//! target/
+//!
+//! # This is the root directory for all output of *dependencies*
+//! deps/
+//!
+//! # Root directory for all compiled examples
+//! examples/
+//!
+//! # This is the location at which the output of all custom build
+//! # commands are rooted
+//! build/
+//!
+//! # Each package gets its own directory where its build script and
+//! # script output are placed
+//! $pkg1/
+//! $pkg2/
+//! $pkg3/
+//!
+//! # Each directory package has a `out` directory where output
+//! # is placed.
+//! out/
+//!
+//! # This is the location at which the output of all old custom build
+//! # commands are rooted
+//! native/
+//!
+//! # Each package gets its own directory for where its output is
+//! # placed. We can't track exactly what's getting put in here, so
+//! # we just assume that all relevant output is in these
+//! # directories.
+//! $pkg1/
+//! $pkg2/
+//! $pkg3/
+//!
+//! # Directory used to store incremental data for the compiler (when
+//! # incremental is enabled.
+//! incremental/
+//!
+//! # Hidden directory that holds all of the fingerprint files for all
+//! # packages
+//! .fingerprint/
+//! ```
+
+use std::fs;
+use std::io;
+use std::path::{Path, PathBuf};
+
+use core::Workspace;
+use util::{CargoResult, Config, FileLock, Filesystem};
+
+/// Contains the paths of all target output locations.
+///
+/// See module docs for more information.
+pub struct Layout {
+ root: PathBuf,
+ deps: PathBuf,
+ native: PathBuf,
+ build: PathBuf,
+ incremental: PathBuf,
+ fingerprint: PathBuf,
+ examples: PathBuf,
+ /// The lockfile for a build, will be unlocked when this struct is `drop`ped.
+ _lock: FileLock,
+}
+
+pub fn is_bad_artifact_name(name: &str) -> bool {
+ ["deps", "examples", "build", "native", "incremental"]
+ .iter()
+ .any(|&reserved| reserved == name)
+}
+
+impl Layout {
+ /// Calculate the paths for build output, lock the build directory, and return as a Layout.
+ ///
+ /// This function will block if the directory is already locked.
+ ///
+ /// Differs from `at` in that this calculates the root path from the workspace target directory,
+ /// adding the target triple and the profile (debug, release, ...).
+ pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CargoResult<Layout> {
+ let mut path = ws.target_dir();
+ // Flexible target specifications often point at filenames, so interpret
+ // the target triple as a Path and then just use the file stem as the
+ // component for the directory name.
+ if let Some(triple) = triple {
+ path.push(Path::new(triple)
+ .file_stem()
+ .ok_or_else(|| format_err!("invalid target"))?);
+ }
+ path.push(dest);
+ Layout::at(ws.config(), path)
+ }
+
+ /// Calculate the paths for build output, lock the build directory, and return as a Layout.
+ ///
+ /// This function will block if the directory is already locked.
+ pub fn at(config: &Config, root: Filesystem) -> CargoResult<Layout> {
+ // For now we don't do any more finer-grained locking on the artifact
+ // directory, so just lock the entire thing for the duration of this
+ // compile.
+ let lock = root.open_rw(".cargo-lock", config, "build directory")?;
+ let root = root.into_path_unlocked();
+
+ Ok(Layout {
+ deps: root.join("deps"),
+ native: root.join("native"),
+ build: root.join("build"),
+ incremental: root.join("incremental"),
+ fingerprint: root.join(".fingerprint"),
+ examples: root.join("examples"),
+ root,
+ _lock: lock,
+ })
+ }
+
+ #[cfg(not(target_os = "macos"))]
+ fn exclude_from_backups(&self, _: &Path) {}
+
+ #[cfg(target_os = "macos")]
+ /// Marks files or directories as excluded from Time Machine on macOS
+ ///
+ /// This is recommended to prevent derived/temporary files from bloating backups.
+ fn exclude_from_backups(&self, path: &Path) {
+ use std::ptr;
+ use core_foundation::{number, string, url};
+ use core_foundation::base::TCFType;
+
+ // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey
+ let is_excluded_key: Result<string::CFString, _> = "NSURLIsExcludedFromBackupKey".parse();
+ match (url::CFURL::from_path(path, false), is_excluded_key) {
+ (Some(path), Ok(is_excluded_key)) => unsafe {
+ url::CFURLSetResourcePropertyForKey(
+ path.as_concrete_TypeRef(),
+ is_excluded_key.as_concrete_TypeRef(),
+ number::kCFBooleanTrue as *const _,
+ ptr::null_mut(),
+ );
+ },
+ // Errors are ignored, since it's an optional feature and failure
+ // doesn't prevent Cargo from working
+ _ => {}
+ }
+ }
+
+ /// Make sure all directories stored in the Layout exist on the filesystem.
+ pub fn prepare(&mut self) -> io::Result<()> {
+ if fs::metadata(&self.root).is_err() {
+ fs::create_dir_all(&self.root)?;
+ }
+
+ self.exclude_from_backups(&self.root);
+
+ mkdir(&self.deps)?;
+ mkdir(&self.native)?;
+ mkdir(&self.incremental)?;
+ mkdir(&self.fingerprint)?;
+ mkdir(&self.examples)?;
+ mkdir(&self.build)?;
+
+ return Ok(());
+
+ fn mkdir(dir: &Path) -> io::Result<()> {
+ if fs::metadata(&dir).is_err() {
+ fs::create_dir(dir)?;
+ }
+ Ok(())
+ }
+ }
+
+ /// Fetch the root path.
+ pub fn dest(&self) -> &Path {
+ &self.root
+ }
+ /// Fetch the deps path.
+ pub fn deps(&self) -> &Path {
+ &self.deps
+ }
+ /// Fetch the examples path.
+ pub fn examples(&self) -> &Path {
+ &self.examples
+ }
+ /// Fetch the root path.
+ pub fn root(&self) -> &Path {
+ &self.root
+ }
+ /// Fetch the incremental path.
+ pub fn incremental(&self) -> &Path {
+ &self.incremental
+ }
+ /// Fetch the fingerprint path.
+ pub fn fingerprint(&self) -> &Path {
+ &self.fingerprint
+ }
+ /// Fetch the build path.
+ pub fn build(&self) -> &Path {
+ &self.build
+ }
+}
--- /dev/null
+use std::collections::{HashMap, HashSet};
+use std::fmt::Write;
+
+use core::{PackageId, Resolve};
+use util::CargoResult;
+use super::Unit;
+
+#[derive(Default)]
+pub struct Links<'a> {
+ validated: HashSet<&'a PackageId>,
+ links: HashMap<String, &'a PackageId>,
+}
+
+impl<'a> Links<'a> {
+ pub fn new() -> Links<'a> {
+ Links {
+ validated: HashSet::new(),
+ links: HashMap::new(),
+ }
+ }
+
+ pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'a>) -> CargoResult<()> {
+ if !self.validated.insert(unit.pkg.package_id()) {
+ return Ok(());
+ }
+ let lib = match unit.pkg.manifest().links() {
+ Some(lib) => lib,
+ None => return Ok(()),
+ };
+ if let Some(prev) = self.links.get(lib) {
+ let pkg = unit.pkg.package_id();
+
+ let describe_path = |pkgid: &PackageId| -> String {
+ let dep_path = resolve.path_to_top(pkgid);
+ let mut dep_path_desc = format!("package `{}`", dep_path[0]);
+ for dep in dep_path.iter().skip(1) {
+ write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap();
+ }
+ dep_path_desc
+ };
+
+ bail!(
+ "multiple packages link to native library `{}`, \
+ but a native library can be linked only once\n\
+ \n\
+ {}\nlinks to native library `{}`\n\
+ \n\
+ {}\nalso links to native library `{}`",
+ lib,
+ describe_path(prev),
+ lib,
+ describe_path(pkg),
+ lib
+ )
+ }
+ if !unit.pkg
+ .manifest()
+ .targets()
+ .iter()
+ .any(|t| t.is_custom_build())
+ {
+ bail!(
+ "package `{}` specifies that it links to `{}` but does not \
+ have a custom build script",
+ unit.pkg.package_id(),
+ lib
+ )
+ }
+ self.links.insert(lib.to_string(), unit.pkg.package_id());
+ Ok(())
+ }
+}
--- /dev/null
+use std::collections::HashMap;
+use std::env;
+use std::ffi::{OsStr, OsString};
+use std::fs;
+use std::io::{self, Write};
+use std::path::{self, Path, PathBuf};
+use std::sync::Arc;
+
+use same_file::is_same_file;
+use serde_json;
+
+use core::{Feature, PackageId, Profile, Target};
+use core::manifest::Lto;
+use core::shell::ColorChoice;
+use util::{self, machine_message, ProcessBuilder};
+use util::{internal, join_paths, profile};
+use util::paths;
+use util::errors::{CargoResult, CargoResultExt, Internal};
+use util::Freshness;
+
+use self::job::{Job, Work};
+use self::job_queue::JobQueue;
+
+use self::output_depinfo::output_depinfo;
+
+pub use self::compilation::Compilation;
+pub use self::context::{Context, FileFlavor, Unit};
+pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts};
+pub use self::layout::is_bad_artifact_name;
+
+mod compilation;
+mod context;
+mod custom_build;
+mod fingerprint;
+mod job;
+mod job_queue;
+mod layout;
+mod links;
+mod output_depinfo;
+
+/// Whether an object is for the host arch, or the target arch.
+///
+/// These will be the same unless cross-compiling.
+#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord)]
+pub enum Kind {
+ Host,
+ Target,
+}
+
+/// Configuration information for a rustc build.
+#[derive(Default, Clone)]
+pub struct BuildConfig {
+ /// The host arch triple
+ ///
+ /// e.g. x86_64-unknown-linux-gnu, would be
+ /// - machine: x86_64
+ /// - hardware-platform: unknown
+ /// - operating system: linux-gnu
+ pub host_triple: String,
+ /// Build information for the host arch
+ pub host: TargetConfig,
+ /// The target arch triple, defaults to host arch
+ pub requested_target: Option<String>,
+ /// Build information for the target
+ pub target: TargetConfig,
+ /// How many rustc jobs to run in parallel
+ pub jobs: u32,
+ /// Whether we are building for release
+ pub release: bool,
+ /// Whether we are running tests
+ pub test: bool,
+ /// Whether we are building documentation
+ pub doc_all: bool,
+ /// Whether to print std output in json format (for machine reading)
+ pub json_messages: bool,
+}
+
+impl BuildConfig {
+ pub fn new(host_triple: &str, requested_target: &Option<String>) -> CargoResult<BuildConfig> {
+ if let Some(ref s) = *requested_target {
+ if s.trim().is_empty() {
+ bail!("target was empty")
+ }
+ }
+ Ok(BuildConfig {
+ host_triple: host_triple.to_string(),
+ requested_target: (*requested_target).clone(),
+ jobs: 1,
+ ..Default::default()
+ })
+ }
+}
+
+/// Information required to build for a target
+#[derive(Clone, Default)]
+pub struct TargetConfig {
+ /// The path of archiver (lib builder) for this target.
+ pub ar: Option<PathBuf>,
+ /// The path of the linker for this target.
+ pub linker: Option<PathBuf>,
+ /// Special build options for any necessary input files (filename -> options)
+ pub overrides: HashMap<String, BuildOutput>,
+}
+
+/// A glorified callback for executing calls to rustc. Rather than calling rustc
+/// directly, we'll use an Executor, giving clients an opportunity to intercept
+/// the build calls.
+pub trait Executor: Send + Sync + 'static {
+ /// Called after a rustc process invocation is prepared up-front for a given
+ /// unit of work (may still be modified for runtime-known dependencies, when
+ /// the work is actually executed).
+ fn init(&self, _cx: &Context, _unit: &Unit) {}
+
+ /// In case of an `Err`, Cargo will not continue with the build process for
+ /// this package.
+ fn exec(&self, cmd: ProcessBuilder, _id: &PackageId, _target: &Target) -> CargoResult<()> {
+ cmd.exec()?;
+ Ok(())
+ }
+
+ fn exec_json(
+ &self,
+ cmd: ProcessBuilder,
+ _id: &PackageId,
+ _target: &Target,
+ handle_stdout: &mut FnMut(&str) -> CargoResult<()>,
+ handle_stderr: &mut FnMut(&str) -> CargoResult<()>,
+ ) -> CargoResult<()> {
+ cmd.exec_with_streaming(handle_stdout, handle_stderr, false)?;
+ Ok(())
+ }
+
+ /// Queried when queuing each unit of work. If it returns true, then the
+ /// unit will always be rebuilt, independent of whether it needs to be.
+ fn force_rebuild(&self, _unit: &Unit) -> bool {
+ false
+ }
+}
+
+/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's
+/// default behaviour.
+#[derive(Copy, Clone)]
+pub struct DefaultExecutor;
+
+impl Executor for DefaultExecutor {}
+
+fn compile<'a, 'cfg: 'a>(
+ cx: &mut Context<'a, 'cfg>,
+ jobs: &mut JobQueue<'a>,
+ unit: &Unit<'a>,
+ exec: &Arc<Executor>,
+) -> CargoResult<()> {
+ if !cx.compiled.insert(*unit) {
+ return Ok(());
+ }
+
+ // Build up the work to be done to compile this unit, enqueuing it once
+ // we've got everything constructed.
+ let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name()));
+ fingerprint::prepare_init(cx, unit)?;
+ cx.links.validate(cx.resolve, unit)?;
+
+ let (dirty, fresh, freshness) = if unit.profile.run_custom_build {
+ custom_build::prepare(cx, unit)?
+ } else if unit.profile.doc && unit.profile.test {
+ // we run these targets later, so this is just a noop for now
+ (Work::noop(), Work::noop(), Freshness::Fresh)
+ } else {
+ let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?;
+ let work = if unit.profile.doc {
+ rustdoc(cx, unit)?
+ } else {
+ rustc(cx, unit, exec)?
+ };
+ // Need to link targets on both the dirty and fresh
+ let dirty = work.then(link_targets(cx, unit, false)?).then(dirty);
+ let fresh = link_targets(cx, unit, true)?.then(fresh);
+
+ if exec.force_rebuild(unit) {
+ freshness = Freshness::Dirty;
+ }
+
+ (dirty, fresh, freshness)
+ };
+ jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?;
+ drop(p);
+
+ // Be sure to compile all dependencies of this target as well.
+ for unit in cx.dep_targets(unit).iter() {
+ compile(cx, jobs, unit, exec)?;
+ }
+
+ Ok(())
+}
+
+fn rustc<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+ exec: &Arc<Executor>,
+) -> CargoResult<Work> {
+ let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?;
+
+ let name = unit.pkg.name().to_string();
+
+ // If this is an upstream dep we don't want warnings from, turn off all
+ // lints.
+ if !cx.show_warnings(unit.pkg.package_id()) {
+ rustc.arg("--cap-lints").arg("allow");
+
+ // If this is an upstream dep but we *do* want warnings, make sure that they
+ // don't fail compilation.
+ } else if !unit.pkg.package_id().source_id().is_path() {
+ rustc.arg("--cap-lints").arg("warn");
+ }
+
+ let outputs = cx.outputs(unit)?;
+ let root = cx.files().out_dir(unit);
+ let kind = unit.kind;
+
+ // Prepare the native lib state (extra -L and -l flags)
+ let build_state = cx.build_state.clone();
+ let current_id = unit.pkg.package_id().clone();
+ let build_deps = load_build_deps(cx, unit);
+
+ // If we are a binary and the package also contains a library, then we
+ // don't pass the `-l` flags.
+ let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib());
+ let do_rename = unit.target.allows_underscores() && !unit.profile.test;
+ let real_name = unit.target.name().to_string();
+ let crate_name = unit.target.crate_name();
+
+ // XXX(Rely on target_filenames iterator as source of truth rather than rederiving filestem)
+ let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() {
+ root.join(&crate_name)
+ } else {
+ root.join(&cx.files().file_stem(unit))
+ }.with_extension("d");
+ let dep_info_loc = fingerprint::dep_info_loc(cx, unit);
+
+ rustc.args(&cx.rustflags_args(unit)?);
+ let json_messages = cx.build_config.json_messages;
+ let package_id = unit.pkg.package_id().clone();
+ let target = unit.target.clone();
+
+ exec.init(cx, unit);
+ let exec = exec.clone();
+
+ let root_output = cx.files().target_root().to_path_buf();
+ let pkg_root = unit.pkg.root().to_path_buf();
+ let cwd = rustc
+ .get_cwd()
+ .unwrap_or_else(|| cx.config.cwd())
+ .to_path_buf();
+
+ return Ok(Work::new(move |state| {
+ // Only at runtime have we discovered what the extra -L and -l
+ // arguments are for native libraries, so we process those here. We
+ // also need to be sure to add any -L paths for our plugins to the
+ // dynamic library load path as a plugin's dynamic library may be
+ // located somewhere in there.
+ // Finally, if custom environment variables have been produced by
+ // previous build scripts, we include them in the rustc invocation.
+ if let Some(build_deps) = build_deps {
+ let build_state = build_state.outputs.lock().unwrap();
+ add_native_deps(
+ &mut rustc,
+ &build_state,
+ &build_deps,
+ pass_l_flag,
+ ¤t_id,
+ )?;
+ add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?;
+ add_custom_env(&mut rustc, &build_state, ¤t_id, kind)?;
+ }
+
+ for output in outputs.iter() {
+ // If there is both an rmeta and rlib, rustc will prefer to use the
+ // rlib, even if it is older. Therefore, we must delete the rlib to
+ // force using the new rmeta.
+ if output.path.extension() == Some(OsStr::new("rmeta")) {
+ let dst = root.join(&output.path).with_extension("rlib");
+ if dst.exists() {
+ paths::remove_file(&dst)?;
+ }
+ }
+ }
+
+ state.running(&rustc);
+ if json_messages {
+ exec.exec_json(
+ rustc,
+ &package_id,
+ &target,
+ &mut |line| {
+ if !line.is_empty() {
+ Err(internal(&format!(
+ "compiler stdout is not empty: `{}`",
+ line
+ )))
+ } else {
+ Ok(())
+ }
+ },
+ &mut |line| {
+ // stderr from rustc can have a mix of JSON and non-JSON output
+ if line.starts_with('{') {
+ // Handle JSON lines
+ let compiler_message = serde_json::from_str(line).map_err(|_| {
+ internal(&format!("compiler produced invalid json: `{}`", line))
+ })?;
+
+ machine_message::emit(&machine_message::FromCompiler {
+ package_id: &package_id,
+ target: &target,
+ message: compiler_message,
+ });
+ } else {
+ // Forward non-JSON to stderr
+ writeln!(io::stderr(), "{}", line)?;
+ }
+ Ok(())
+ },
+ ).chain_err(|| format!("Could not compile `{}`.", name))?;
+ } else {
+ exec.exec(rustc, &package_id, &target)
+ .map_err(Internal::new)
+ .chain_err(|| format!("Could not compile `{}`.", name))?;
+ }
+
+ if do_rename && real_name != crate_name {
+ let dst = &outputs[0].path;
+ let src = dst.with_file_name(
+ dst.file_name()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .replace(&real_name, &crate_name),
+ );
+ if src.exists() && src.file_name() != dst.file_name() {
+ fs::rename(&src, &dst)
+ .chain_err(|| internal(format!("could not rename crate {:?}", src)))?;
+ }
+ }
+
+ if rustc_dep_info_loc.exists() {
+ fingerprint::translate_dep_info(&rustc_dep_info_loc, &dep_info_loc, &pkg_root, &cwd)
+ .chain_err(|| {
+ internal(format!(
+ "could not parse/generate dep info at: {}",
+ rustc_dep_info_loc.display()
+ ))
+ })?;
+ }
+
+ Ok(())
+ }));
+
+ // Add all relevant -L and -l flags from dependencies (now calculated and
+ // present in `state`) to the command provided
+ fn add_native_deps(
+ rustc: &mut ProcessBuilder,
+ build_state: &BuildMap,
+ build_scripts: &BuildScripts,
+ pass_l_flag: bool,
+ current_id: &PackageId,
+ ) -> CargoResult<()> {
+ for key in build_scripts.to_link.iter() {
+ let output = build_state.get(key).ok_or_else(|| {
+ internal(format!(
+ "couldn't find build state for {}/{:?}",
+ key.0, key.1
+ ))
+ })?;
+ for path in output.library_paths.iter() {
+ rustc.arg("-L").arg(path);
+ }
+ if key.0 == *current_id {
+ for cfg in &output.cfgs {
+ rustc.arg("--cfg").arg(cfg);
+ }
+ if pass_l_flag {
+ for name in output.library_links.iter() {
+ rustc.arg("-l").arg(name);
+ }
+ }
+ }
+ }
+ Ok(())
+ }
+
+ // Add all custom environment variables present in `state` (after they've
+ // been put there by one of the `build_scripts`) to the command provided.
+ fn add_custom_env(
+ rustc: &mut ProcessBuilder,
+ build_state: &BuildMap,
+ current_id: &PackageId,
+ kind: Kind,
+ ) -> CargoResult<()> {
+ let key = (current_id.clone(), kind);
+ if let Some(output) = build_state.get(&key) {
+ for &(ref name, ref value) in output.env.iter() {
+ rustc.env(name, value);
+ }
+ }
+ Ok(())
+ }
+}
+
+/// Link the compiled target (often of form `foo-{metadata_hash}`) to the
+/// final target. This must happen during both "Fresh" and "Compile"
+fn link_targets<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+ fresh: bool,
+) -> CargoResult<Work> {
+ let outputs = cx.outputs(unit)?;
+ let export_dir = cx.files().export_dir(unit);
+ let package_id = unit.pkg.package_id().clone();
+ let target = unit.target.clone();
+ let profile = unit.profile.clone();
+ let features = cx.resolve
+ .features_sorted(&package_id)
+ .into_iter()
+ .map(|s| s.to_owned())
+ .collect();
+ let json_messages = cx.build_config.json_messages;
+
+ Ok(Work::new(move |_| {
+ // If we're a "root crate", e.g. the target of this compilation, then we
+ // hard link our outputs out of the `deps` directory into the directory
+ // above. This means that `cargo build` will produce binaries in
+ // `target/debug` which one probably expects.
+ let mut destinations = vec![];
+ for output in outputs.iter() {
+ let src = &output.path;
+ // This may have been a `cargo rustc` command which changes the
+ // output, so the source may not actually exist.
+ if !src.exists() {
+ continue;
+ }
+ let dst = match output.hardlink.as_ref() {
+ Some(dst) => dst,
+ None => {
+ destinations.push(src.display().to_string());
+ continue;
+ }
+ };
+ destinations.push(dst.display().to_string());
+ hardlink_or_copy(src, dst)?;
+ if let Some(ref path) = export_dir {
+ if !path.exists() {
+ fs::create_dir_all(path)?;
+ }
+
+ hardlink_or_copy(src, &path.join(dst.file_name().unwrap()))?;
+ }
+ }
+
+ if json_messages {
+ machine_message::emit(&machine_message::Artifact {
+ package_id: &package_id,
+ target: &target,
+ profile: &profile,
+ features,
+ filenames: destinations,
+ fresh,
+ });
+ }
+ Ok(())
+ }))
+}
+
+fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> {
+ debug!("linking {} to {}", src.display(), dst.display());
+ if is_same_file(src, dst).unwrap_or(false) {
+ return Ok(());
+ }
+ if dst.exists() {
+ paths::remove_file(&dst)?;
+ }
+
+ let link_result = if src.is_dir() {
+ #[cfg(unix)]
+ use std::os::unix::fs::symlink;
+ #[cfg(target_os = "redox")]
+ use std::os::redox::fs::symlink;
+ #[cfg(windows)]
+ use std::os::windows::fs::symlink_dir as symlink;
+
+ let dst_dir = dst.parent().unwrap();
+ let src = if src.starts_with(dst_dir) {
+ src.strip_prefix(dst_dir).unwrap()
+ } else {
+ src
+ };
+ symlink(src, dst)
+ } else {
+ fs::hard_link(src, dst)
+ };
+ link_result
+ .or_else(|err| {
+ debug!("link failed {}. falling back to fs::copy", err);
+ fs::copy(src, dst).map(|_| ())
+ })
+ .chain_err(|| {
+ format!(
+ "failed to link or copy `{}` to `{}`",
+ src.display(),
+ dst.display()
+ )
+ })?;
+ Ok(())
+}
+
+fn load_build_deps(cx: &Context, unit: &Unit) -> Option<Arc<BuildScripts>> {
+ cx.build_scripts.get(unit).cloned()
+}
+
+// For all plugin dependencies, add their -L paths (now calculated and
+// present in `state`) to the dynamic library load path for the command to
+// execute.
+fn add_plugin_deps(
+ rustc: &mut ProcessBuilder,
+ build_state: &BuildMap,
+ build_scripts: &BuildScripts,
+ root_output: &PathBuf,
+) -> CargoResult<()> {
+ let var = util::dylib_path_envvar();
+ let search_path = rustc.get_env(var).unwrap_or_default();
+ let mut search_path = env::split_paths(&search_path).collect::<Vec<_>>();
+ for id in build_scripts.plugins.iter() {
+ let key = (id.clone(), Kind::Host);
+ let output = build_state
+ .get(&key)
+ .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?;
+ search_path.append(&mut filter_dynamic_search_path(
+ output.library_paths.iter(),
+ root_output,
+ ));
+ }
+ let search_path = join_paths(&search_path, var)?;
+ rustc.env(var, &search_path);
+ Ok(())
+}
+
+// Determine paths to add to the dynamic search path from -L entries
+//
+// Strip off prefixes like "native=" or "framework=" and filter out directories
+// *not* inside our output directory since they are likely spurious and can cause
+// clashes with system shared libraries (issue #3366).
+fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec<PathBuf>
+where
+ I: Iterator<Item = &'a PathBuf>,
+{
+ let mut search_path = vec![];
+ for dir in paths {
+ let dir = match dir.to_str() {
+ Some(s) => {
+ let mut parts = s.splitn(2, '=');
+ match (parts.next(), parts.next()) {
+ (Some("native"), Some(path))
+ | (Some("crate"), Some(path))
+ | (Some("dependency"), Some(path))
+ | (Some("framework"), Some(path))
+ | (Some("all"), Some(path)) => path.into(),
+ _ => dir.clone(),
+ }
+ }
+ None => dir.clone(),
+ };
+ if dir.starts_with(&root_output) {
+ search_path.push(dir);
+ } else {
+ debug!(
+ "Not including path {} in runtime library search path because it is \
+ outside target root {}",
+ dir.display(),
+ root_output.display()
+ );
+ }
+ }
+ search_path
+}
+
+fn prepare_rustc<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ crate_types: &[&str],
+ unit: &Unit<'a>,
+) -> CargoResult<ProcessBuilder> {
+ let mut base = cx.compilation.rustc_process(unit.pkg)?;
+ base.inherit_jobserver(&cx.jobserver);
+ build_base_args(cx, &mut base, unit, crate_types)?;
+ build_deps_args(&mut base, cx, unit)?;
+ Ok(base)
+}
+
+fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Work> {
+ let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?;
+ rustdoc.inherit_jobserver(&cx.jobserver);
+ rustdoc.arg("--crate-name").arg(&unit.target.crate_name());
+ add_path_args(cx, unit, &mut rustdoc);
+
+ if unit.kind != Kind::Host {
+ if let Some(target) = cx.requested_target() {
+ rustdoc.arg("--target").arg(target);
+ }
+ }
+
+ let doc_dir = cx.files().out_dir(unit);
+
+ // Create the documentation directory ahead of time as rustdoc currently has
+ // a bug where concurrent invocations will race to create this directory if
+ // it doesn't already exist.
+ fs::create_dir_all(&doc_dir)?;
+
+ rustdoc.arg("-o").arg(doc_dir);
+
+ for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
+ rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
+ }
+
+ let manifest = unit.pkg.manifest();
+
+ if manifest.features().is_enabled(Feature::edition()) {
+ rustdoc.arg("-Zunstable-options");
+ rustdoc.arg(format!("--edition={}", &manifest.edition()));
+ }
+
+ if let Some(ref args) = unit.profile.rustdoc_args {
+ rustdoc.args(args);
+ }
+
+ build_deps_args(&mut rustdoc, cx, unit)?;
+
+ rustdoc.args(&cx.rustdocflags_args(unit)?);
+
+ let name = unit.pkg.name().to_string();
+ let build_state = cx.build_state.clone();
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+
+ Ok(Work::new(move |state| {
+ if let Some(output) = build_state.outputs.lock().unwrap().get(&key) {
+ for cfg in output.cfgs.iter() {
+ rustdoc.arg("--cfg").arg(cfg);
+ }
+ for &(ref name, ref value) in output.env.iter() {
+ rustdoc.env(name, value);
+ }
+ }
+ state.running(&rustdoc);
+ rustdoc
+ .exec()
+ .chain_err(|| format!("Could not document `{}`.", name))?;
+ Ok(())
+ }))
+}
+
+// The path that we pass to rustc is actually fairly important because it will
+// show up in error messages (important for readability), debug information
+// (important for caching), etc. As a result we need to be pretty careful how we
+// actually invoke rustc.
+//
+// In general users don't expect `cargo build` to cause rebuilds if you change
+// directories. That could be if you just change directories in the project or
+// if you literally move the whole project wholesale to a new directory. As a
+// result we mostly don't factor in `cwd` to this calculation. Instead we try to
+// track the workspace as much as possible and we update the current directory
+// of rustc/rustdoc where approrpriate.
+//
+// The first returned value here is the argument to pass to rustc, and the
+// second is the cwd that rustc should operate in.
+fn path_args(cx: &Context, unit: &Unit) -> (PathBuf, PathBuf) {
+ let ws_root = cx.ws.root();
+ let src = unit.target.src_path();
+ assert!(src.is_absolute());
+ match src.strip_prefix(ws_root) {
+ Ok(path) => (path.to_path_buf(), ws_root.to_path_buf()),
+ Err(_) => (src.to_path_buf(), unit.pkg.root().to_path_buf()),
+ }
+}
+
+fn add_path_args(cx: &Context, unit: &Unit, cmd: &mut ProcessBuilder) {
+ let (arg, cwd) = path_args(cx, unit);
+ cmd.arg(arg);
+ cmd.cwd(cwd);
+}
+
+fn build_base_args<'a, 'cfg>(
+ cx: &mut Context<'a, 'cfg>,
+ cmd: &mut ProcessBuilder,
+ unit: &Unit<'a>,
+ crate_types: &[&str],
+) -> CargoResult<()> {
+ let Profile {
+ ref opt_level,
+ ref lto,
+ codegen_units,
+ ref rustc_args,
+ debuginfo,
+ debug_assertions,
+ overflow_checks,
+ rpath,
+ test,
+ doc: _doc,
+ run_custom_build,
+ ref panic,
+ check,
+ ..
+ } = *unit.profile;
+ assert!(!run_custom_build);
+
+ cmd.arg("--crate-name").arg(&unit.target.crate_name());
+
+ add_path_args(cx, unit, cmd);
+
+ match cx.config.shell().color_choice() {
+ ColorChoice::Always => {
+ cmd.arg("--color").arg("always");
+ }
+ ColorChoice::Never => {
+ cmd.arg("--color").arg("never");
+ }
+ ColorChoice::CargoAuto => {}
+ }
+
+ if cx.build_config.json_messages {
+ cmd.arg("--error-format").arg("json");
+ }
+
+ if !test {
+ for crate_type in crate_types.iter() {
+ cmd.arg("--crate-type").arg(crate_type);
+ }
+ }
+
+ if check {
+ cmd.arg("--emit=dep-info,metadata");
+ } else {
+ cmd.arg("--emit=dep-info,link");
+ }
+
+ let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build())
+ || (crate_types.contains(&"dylib") && cx.ws.members().any(|p| p != unit.pkg));
+ if prefer_dynamic {
+ cmd.arg("-C").arg("prefer-dynamic");
+ }
+
+ if opt_level != "0" {
+ cmd.arg("-C").arg(&format!("opt-level={}", opt_level));
+ }
+
+ // If a panic mode was configured *and* we're not ever going to be used in a
+ // plugin, then we can compile with that panic mode.
+ //
+ // If we're used in a plugin then we'll eventually be linked to libsyntax
+ // most likely which isn't compiled with a custom panic mode, so we'll just
+ // get an error if we actually compile with that. This fixes `panic=abort`
+ // crates which have plugin dependencies, but unfortunately means that
+ // dependencies shared between the main application and plugins must be
+ // compiled without `panic=abort`. This isn't so bad, though, as the main
+ // application will still be compiled with `panic=abort`.
+ if let Some(panic) = panic.as_ref() {
+ if !cx.used_in_plugin.contains(unit) {
+ cmd.arg("-C").arg(format!("panic={}", panic));
+ }
+ }
+ let manifest = unit.pkg.manifest();
+
+ if manifest.features().is_enabled(Feature::edition()) {
+ cmd.arg(format!("-Zedition={}", manifest.edition()));
+ }
+
+ // Disable LTO for host builds as prefer_dynamic and it are mutually
+ // exclusive.
+ if unit.target.can_lto() && !unit.target.for_host() {
+ match *lto {
+ Lto::Bool(false) => {}
+ Lto::Bool(true) => {
+ cmd.args(&["-C", "lto"]);
+ }
+ Lto::Named(ref s) => {
+ cmd.arg("-C").arg(format!("lto={}", s));
+ }
+ }
+ }
+
+ if let Some(n) = codegen_units {
+ // There are some restrictions with LTO and codegen-units, so we
+ // only add codegen units when LTO is not used.
+ cmd.arg("-C").arg(&format!("codegen-units={}", n));
+ }
+
+ if let Some(debuginfo) = debuginfo {
+ cmd.arg("-C").arg(format!("debuginfo={}", debuginfo));
+ }
+
+ if let Some(ref args) = *rustc_args {
+ cmd.args(args);
+ }
+
+ // -C overflow-checks is implied by the setting of -C debug-assertions,
+ // so we only need to provide -C overflow-checks if it differs from
+ // the value of -C debug-assertions we would provide.
+ if opt_level != "0" {
+ if debug_assertions {
+ cmd.args(&["-C", "debug-assertions=on"]);
+ if !overflow_checks {
+ cmd.args(&["-C", "overflow-checks=off"]);
+ }
+ } else if overflow_checks {
+ cmd.args(&["-C", "overflow-checks=on"]);
+ }
+ } else if !debug_assertions {
+ cmd.args(&["-C", "debug-assertions=off"]);
+ if overflow_checks {
+ cmd.args(&["-C", "overflow-checks=on"]);
+ }
+ } else if !overflow_checks {
+ cmd.args(&["-C", "overflow-checks=off"]);
+ }
+
+ if test && unit.target.harness() {
+ cmd.arg("--test");
+ } else if test {
+ cmd.arg("--cfg").arg("test");
+ }
+
+ // We ideally want deterministic invocations of rustc to ensure that
+ // rustc-caching strategies like sccache are able to cache more, so sort the
+ // feature list here.
+ for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
+ cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
+ }
+
+ match cx.files().metadata(unit) {
+ Some(m) => {
+ cmd.arg("-C").arg(&format!("metadata={}", m));
+ cmd.arg("-C").arg(&format!("extra-filename=-{}", m));
+ }
+ None => {
+ cmd.arg("-C")
+ .arg(&format!("metadata={}", cx.files().target_short_hash(unit)));
+ }
+ }
+
+ if rpath {
+ cmd.arg("-C").arg("rpath");
+ }
+
+ cmd.arg("--out-dir").arg(&cx.files().out_dir(unit));
+
+ fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) {
+ if let Some(val) = val {
+ let mut joined = OsString::from(prefix);
+ joined.push(val);
+ cmd.arg(key).arg(joined);
+ }
+ }
+
+ if unit.kind == Kind::Target {
+ opt(
+ cmd,
+ "--target",
+ "",
+ cx.requested_target().map(|s| s.as_ref()),
+ );
+ }
+
+ opt(cmd, "-C", "ar=", cx.ar(unit.kind).map(|s| s.as_ref()));
+ opt(
+ cmd,
+ "-C",
+ "linker=",
+ cx.linker(unit.kind).map(|s| s.as_ref()),
+ );
+ cmd.args(&cx.incremental_args(unit)?);
+
+ Ok(())
+}
+
+fn build_deps_args<'a, 'cfg>(
+ cmd: &mut ProcessBuilder,
+ cx: &mut Context<'a, 'cfg>,
+ unit: &Unit<'a>,
+) -> CargoResult<()> {
+ cmd.arg("-L").arg(&{
+ let mut deps = OsString::from("dependency=");
+ deps.push(cx.files().deps_dir(unit));
+ deps
+ });
+
+ // Be sure that the host path is also listed. This'll ensure that proc-macro
+ // dependencies are correctly found (for reexported macros).
+ if let Kind::Target = unit.kind {
+ cmd.arg("-L").arg(&{
+ let mut deps = OsString::from("dependency=");
+ deps.push(cx.files().host_deps());
+ deps
+ });
+ }
+
+ let dep_targets = cx.dep_targets(unit);
+
+ // If there is not one linkable target but should, rustc fails later
+ // on if there is an `extern crate` for it. This may turn into a hard
+ // error in the future, see PR #4797
+ if !dep_targets
+ .iter()
+ .any(|u| !u.profile.doc && u.target.linkable())
+ {
+ if let Some(u) = dep_targets
+ .iter()
+ .find(|u| !u.profile.doc && u.target.is_lib())
+ {
+ cx.config.shell().warn(format!(
+ "The package `{}` \
+ provides no linkable target. The compiler might raise an error while compiling \
+ `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \
+ Cargo.toml. This warning might turn into a hard error in the future.",
+ u.target.crate_name(),
+ unit.target.crate_name(),
+ u.target.crate_name()
+ ))?;
+ }
+ }
+
+ for dep in dep_targets {
+ if dep.profile.run_custom_build {
+ cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep));
+ }
+ if dep.target.linkable() && !dep.profile.doc {
+ link_to(cmd, cx, unit, &dep)?;
+ }
+ }
+
+ return Ok(());
+
+ fn link_to<'a, 'cfg>(
+ cmd: &mut ProcessBuilder,
+ cx: &mut Context<'a, 'cfg>,
+ current: &Unit<'a>,
+ dep: &Unit<'a>,
+ ) -> CargoResult<()> {
+ for output in cx.outputs(dep)?.iter() {
+ if output.flavor != FileFlavor::Linkable {
+ continue;
+ }
+ let mut v = OsString::new();
+
+ // Unfortunately right now Cargo doesn't have a great way to get a
+ // 1:1 mapping of entries in `dependencies()` to the actual crate
+ // we're depending on. Instead we're left to do some guesswork here
+ // to figure out what `Dependency` the `dep` unit corresponds to in
+ // `current` to see if we're renaming it.
+ //
+ // This I believe mostly works out for now, but we'll likely want
+ // to tighten up this in the future.
+ let name = current
+ .pkg
+ .dependencies()
+ .iter()
+ .filter(|d| d.matches_ignoring_source(dep.pkg.package_id()))
+ .filter_map(|d| d.rename())
+ .next();
+
+ v.push(name.unwrap_or(&dep.target.crate_name()));
+ v.push("=");
+ v.push(cx.files().out_dir(dep));
+ v.push(&path::MAIN_SEPARATOR.to_string());
+ v.push(&output.path.file_name().unwrap());
+ cmd.arg("--extern").arg(&v);
+ }
+ Ok(())
+ }
+}
+
+fn envify(s: &str) -> String {
+ s.chars()
+ .flat_map(|c| c.to_uppercase())
+ .map(|c| if c == '-' { '_' } else { c })
+ .collect()
+}
+
+impl Kind {
+ fn for_target(&self, target: &Target) -> Kind {
+ // Once we start compiling for the `Host` kind we continue doing so, but
+ // if we are a `Target` kind and then we start compiling for a target
+ // that needs to be on the host we lift ourselves up to `Host`
+ match *self {
+ Kind::Host => Kind::Host,
+ Kind::Target if target.for_host() => Kind::Host,
+ Kind::Target => Kind::Target,
+ }
+ }
+}
--- /dev/null
+use std::collections::{BTreeSet, HashSet};
+use std::io::{BufWriter, Write};
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use super::{fingerprint, Context, Unit};
+use util::{internal, CargoResult};
+use util::paths;
+
+fn render_filename<P: AsRef<Path>>(path: P, basedir: Option<&str>) -> CargoResult<String> {
+ let path = path.as_ref();
+ let relpath = match basedir {
+ None => path,
+ Some(base) => match path.strip_prefix(base) {
+ Ok(relpath) => relpath,
+ _ => path,
+ },
+ };
+ relpath
+ .to_str()
+ .ok_or_else(|| internal("path not utf-8"))
+ .map(|f| f.replace(" ", "\\ "))
+}
+
+fn add_deps_for_unit<'a, 'b>(
+ deps: &mut BTreeSet<PathBuf>,
+ context: &mut Context<'a, 'b>,
+ unit: &Unit<'a>,
+ visited: &mut HashSet<Unit<'a>>,
+) -> CargoResult<()> {
+ if !visited.insert(*unit) {
+ return Ok(());
+ }
+
+ // units representing the execution of a build script don't actually
+ // generate a dep info file, so we just keep on going below
+ if !unit.profile.run_custom_build {
+ // Add dependencies from rustc dep-info output (stored in fingerprint directory)
+ let dep_info_loc = fingerprint::dep_info_loc(context, unit);
+ if let Some(paths) = fingerprint::parse_dep_info(unit.pkg, &dep_info_loc)? {
+ for path in paths {
+ deps.insert(path);
+ }
+ } else {
+ debug!(
+ "can't find dep_info for {:?} {:?}",
+ unit.pkg.package_id(),
+ unit.profile
+ );
+ return Err(internal("dep_info missing"));
+ }
+ }
+
+ // Add rerun-if-changed dependencies
+ let key = (unit.pkg.package_id().clone(), unit.kind);
+ if let Some(output) = context.build_state.outputs.lock().unwrap().get(&key) {
+ for path in &output.rerun_if_changed {
+ deps.insert(path.into());
+ }
+ }
+
+ // Recursively traverse all transitive dependencies
+ for dep_unit in context.dep_targets(unit).iter() {
+ let source_id = dep_unit.pkg.package_id().source_id();
+ if source_id.is_path() {
+ add_deps_for_unit(deps, context, dep_unit, visited)?;
+ }
+ }
+ Ok(())
+}
+
+pub fn output_depinfo<'a, 'b>(context: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> {
+ let mut deps = BTreeSet::new();
+ let mut visited = HashSet::new();
+ let success = add_deps_for_unit(&mut deps, context, unit, &mut visited).is_ok();
+ let basedir_string;
+ let basedir = match context.config.get_path("build.dep-info-basedir")? {
+ Some(value) => {
+ basedir_string = value
+ .val
+ .as_os_str()
+ .to_str()
+ .ok_or_else(|| internal("build.dep-info-basedir path not utf-8"))?
+ .to_string();
+ Some(basedir_string.as_str())
+ }
+ None => None,
+ };
+ let deps = deps.iter()
+ .map(|f| render_filename(f, basedir))
+ .collect::<CargoResult<Vec<_>>>()?;
+
+ for output in context.outputs(unit)?.iter() {
+ if let Some(ref link_dst) = output.hardlink {
+ let output_path = link_dst.with_extension("d");
+ if success {
+ let target_fn = render_filename(link_dst, basedir)?;
+
+ // If nothing changed don't recreate the file which could alter
+ // its mtime
+ if let Ok(previous) = fingerprint::parse_rustc_dep_info(&output_path) {
+ if previous.len() == 1 && previous[0].0 == target_fn && previous[0].1 == deps {
+ continue;
+ }
+ }
+
+ // Otherwise write it all out
+ let mut outfile = BufWriter::new(File::create(output_path)?);
+ write!(outfile, "{}:", target_fn)?;
+ for dep in &deps {
+ write!(outfile, " {}", dep)?;
+ }
+ writeln!(outfile, "")?;
+
+ // dep-info generation failed, so delete output file. This will
+ // usually cause the build system to always rerun the build
+ // rule, which is correct if inefficient.
+ } else if output_path.exists() {
+ paths::remove_file(output_path)?;
+ }
+ }
+ }
+ Ok(())
+}
pub mod summary;
pub mod shell;
pub mod registry;
+pub mod compiler;
mod interning;
mod package_id_spec;
mod workspace;
use std::path::Path;
use core::{Profiles, Workspace};
+use core::compiler::{BuildConfig, Context, Kind, Unit};
use util::Config;
use util::errors::{CargoResult, CargoResultExt};
use util::paths;
-use ops::{self, BuildConfig, Context, Kind, Unit};
+use ops;
pub struct CleanOptions<'a> {
pub config: &'a Config,
use core::{Package, Source, Target};
use core::{PackageId, PackageIdSpec, Profile, Profiles, TargetKind, Workspace};
+use core::compiler::{BuildConfig, BuildOutput, Compilation, Context, DefaultExecutor, Executor};
+use core::compiler::{Kind, TargetConfig, Unit};
use core::resolver::{Method, Resolve};
-use ops::{self, BuildOutput, Context, DefaultExecutor, Executor, Kind, Unit};
+use ops;
use util::config::Config;
use util::{profile, CargoResult, CargoResultExt};
pub fn compile<'a>(
ws: &Workspace<'a>,
options: &CompileOptions<'a>,
-) -> CargoResult<ops::Compilation<'a>> {
+) -> CargoResult<Compilation<'a>> {
compile_with_exec(ws, options, Arc::new(DefaultExecutor))
}
ws: &Workspace<'a>,
options: &CompileOptions<'a>,
exec: Arc<Executor>,
-) -> CargoResult<ops::Compilation<'a>> {
+) -> CargoResult<Compilation<'a>> {
for member in ws.members() {
for warning in member.manifest().warnings().iter() {
if warning.is_critical {
source: Option<Box<Source + 'a>>,
options: &CompileOptions<'a>,
exec: Arc<Executor>,
-) -> CargoResult<ops::Compilation<'a>> {
+) -> CargoResult<Compilation<'a>> {
let CompileOptions {
config,
jobs,
config: &Config,
jobs: Option<u32>,
target: Option<String>,
-) -> CargoResult<ops::BuildConfig> {
+) -> CargoResult<BuildConfig> {
if jobs.is_some() && config.jobserver_from_env().is_some() {
config.shell().warn(
"a `-j` argument was passed to Cargo but Cargo is \
let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32);
let cfg_target = config.get_string("build.target")?.map(|s| s.val);
let target = target.or(cfg_target);
- let mut base = ops::BuildConfig::new(&config.rustc()?.host, &target)?;
+ let mut base = BuildConfig::new(&config.rustc()?.host, &target)?;
base.jobs = jobs;
base.host = scrape_target_config(config, &base.host_triple)?;
base.target = match target.as_ref() {
Ok(base)
}
-fn scrape_target_config(config: &Config, triple: &str) -> CargoResult<ops::TargetConfig> {
+fn scrape_target_config(config: &Config, triple: &str) -> CargoResult<TargetConfig> {
let key = format!("target.{}", triple);
- let mut ret = ops::TargetConfig {
+ let mut ret = TargetConfig {
ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val),
linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val),
overrides: HashMap::new(),
use core::{Dependency, Edition, Package, PackageIdSpec, Source, SourceId};
use core::{PackageId, Workspace};
-use ops::{self, CompileFilter, DefaultExecutor};
+use core::compiler::DefaultExecutor;
+use ops::{self, CompileFilter};
use sources::{GitSource, PathSource, SourceConfigMap};
use util::{internal, Config};
use util::{FileLock, Filesystem};
use git2::Config as GitConfig;
use git2::Repository as GitRepository;
-use core::Workspace;
-use ops::is_bad_artifact_name;
+use core::{compiler, Workspace};
use util::{internal, FossilRepo, GitRepo, HgRepo, PijulRepo};
use util::{paths, Config};
use util::errors::{CargoResult, CargoResultExt};
"pure", "ref", "return", "self", "sizeof", "static", "struct", "super", "test", "trait",
"true", "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield",
];
- if blacklist.contains(&name) || (opts.kind.is_bin() && is_bad_artifact_name(name)) {
+ if blacklist.contains(&name) || (opts.kind.is_bin() && compiler::is_bad_artifact_name(name)) {
bail!(
"The name `{}` cannot be used as a crate name{}",
name,
use tar::{Archive, Builder, EntryType, Header};
use core::{Package, Source, SourceId, Workspace};
+use core::compiler::DefaultExecutor;
use sources::PathSource;
use util::{self, internal, Config, FileLock};
use util::paths;
use util::errors::{CargoResult, CargoResultExt};
-use ops::{self, DefaultExecutor};
+use ops;
pub struct PackageOpts<'cfg> {
pub config: &'cfg Config,
+++ /dev/null
-use std::collections::{BTreeSet, HashMap, HashSet};
-use std::ffi::OsStr;
-use std::path::PathBuf;
-
-use semver::Version;
-use lazycell::LazyCell;
-
-use core::{Package, PackageId, Target, TargetKind};
-use util::{self, join_paths, process, CargoResult, Config, ProcessBuilder};
-
-/// A structure returning the result of a compilation.
-pub struct Compilation<'cfg> {
- /// A mapping from a package to the list of libraries that need to be
- /// linked when working with that package.
- pub libraries: HashMap<PackageId, HashSet<(Target, PathBuf)>>,
-
- /// An array of all tests created during this compilation.
- pub tests: Vec<(Package, TargetKind, String, PathBuf)>,
-
- /// An array of all binaries created.
- pub binaries: Vec<PathBuf>,
-
- /// All directories for the output of native build commands.
- ///
- /// This is currently used to drive some entries which are added to the
- /// LD_LIBRARY_PATH as appropriate.
- ///
- /// The order should be deterministic.
- // TODO: deprecated, remove
- pub native_dirs: BTreeSet<PathBuf>,
-
- /// Root output directory (for the local package's artifacts)
- pub root_output: PathBuf,
-
- /// Output directory for rust dependencies.
- /// May be for the host or for a specific target.
- pub deps_output: PathBuf,
-
- /// Output directory for the rust host dependencies.
- pub host_deps_output: PathBuf,
-
- /// The path to rustc's own libstd
- pub host_dylib_path: Option<PathBuf>,
-
- /// The path to libstd for the target
- pub target_dylib_path: Option<PathBuf>,
-
- /// Extra environment variables that were passed to compilations and should
- /// be passed to future invocations of programs.
- pub extra_env: HashMap<PackageId, Vec<(String, String)>>,
-
- pub to_doc_test: Vec<Package>,
-
- /// Features per package enabled during this compilation.
- pub cfgs: HashMap<PackageId, HashSet<String>>,
-
- /// Flags to pass to rustdoc when invoked from cargo test, per package.
- pub rustdocflags: HashMap<PackageId, Vec<String>>,
-
- pub target: String,
-
- config: &'cfg Config,
-
- target_runner: LazyCell<Option<(PathBuf, Vec<String>)>>,
-}
-
-impl<'cfg> Compilation<'cfg> {
- pub fn new(config: &'cfg Config) -> Compilation<'cfg> {
- Compilation {
- libraries: HashMap::new(),
- native_dirs: BTreeSet::new(), // TODO: deprecated, remove
- root_output: PathBuf::from("/"),
- deps_output: PathBuf::from("/"),
- host_deps_output: PathBuf::from("/"),
- host_dylib_path: None,
- target_dylib_path: None,
- tests: Vec::new(),
- binaries: Vec::new(),
- extra_env: HashMap::new(),
- to_doc_test: Vec::new(),
- cfgs: HashMap::new(),
- rustdocflags: HashMap::new(),
- config,
- target: String::new(),
- target_runner: LazyCell::new(),
- }
- }
-
- /// See `process`.
- pub fn rustc_process(&self, pkg: &Package) -> CargoResult<ProcessBuilder> {
- self.fill_env(self.config.rustc()?.process(), pkg, true)
- }
-
- /// See `process`.
- pub fn rustdoc_process(&self, pkg: &Package) -> CargoResult<ProcessBuilder> {
- self.fill_env(process(&*self.config.rustdoc()?), pkg, false)
- }
-
- /// See `process`.
- pub fn host_process<T: AsRef<OsStr>>(
- &self,
- cmd: T,
- pkg: &Package,
- ) -> CargoResult<ProcessBuilder> {
- self.fill_env(process(cmd), pkg, true)
- }
-
- fn target_runner(&self) -> CargoResult<&Option<(PathBuf, Vec<String>)>> {
- self.target_runner.try_borrow_with(|| {
- let key = format!("target.{}.runner", self.target);
- Ok(self.config.get_path_and_args(&key)?.map(|v| v.val))
- })
- }
-
- /// See `process`.
- pub fn target_process<T: AsRef<OsStr>>(
- &self,
- cmd: T,
- pkg: &Package,
- ) -> CargoResult<ProcessBuilder> {
- let builder = if let Some((ref runner, ref args)) = *self.target_runner()? {
- let mut builder = process(runner);
- builder.args(args);
- builder.arg(cmd);
- builder
- } else {
- process(cmd)
- };
- self.fill_env(builder, pkg, false)
- }
-
- /// Prepares a new process with an appropriate environment to run against
- /// the artifacts produced by the build process.
- ///
- /// The package argument is also used to configure environment variables as
- /// well as the working directory of the child process.
- fn fill_env(
- &self,
- mut cmd: ProcessBuilder,
- pkg: &Package,
- is_host: bool,
- ) -> CargoResult<ProcessBuilder> {
- let mut search_path = if is_host {
- let mut search_path = vec![self.host_deps_output.clone()];
- search_path.extend(self.host_dylib_path.clone());
- search_path
- } else {
- let mut search_path =
- super::filter_dynamic_search_path(self.native_dirs.iter(), &self.root_output);
- search_path.push(self.root_output.clone());
- search_path.push(self.deps_output.clone());
- search_path.extend(self.target_dylib_path.clone());
- search_path
- };
-
- search_path.extend(util::dylib_path().into_iter());
- let search_path = join_paths(&search_path, util::dylib_path_envvar())?;
-
- cmd.env(util::dylib_path_envvar(), &search_path);
- if let Some(env) = self.extra_env.get(pkg.package_id()) {
- for &(ref k, ref v) in env {
- cmd.env(k, v);
- }
- }
-
- let metadata = pkg.manifest().metadata();
-
- let cargo_exe = self.config.cargo_exe()?;
- cmd.env(::CARGO_ENV, cargo_exe);
-
- // When adding new environment variables depending on
- // crate properties which might require rebuild upon change
- // consider adding the corresponding properties to the hash
- // in Context::target_metadata()
- cmd.env("CARGO_MANIFEST_DIR", pkg.root())
- .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string())
- .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string())
- .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string())
- .env(
- "CARGO_PKG_VERSION_PRE",
- &pre_version_component(pkg.version()),
- )
- .env("CARGO_PKG_VERSION", &pkg.version().to_string())
- .env("CARGO_PKG_NAME", &*pkg.name())
- .env(
- "CARGO_PKG_DESCRIPTION",
- metadata.description.as_ref().unwrap_or(&String::new()),
- )
- .env(
- "CARGO_PKG_HOMEPAGE",
- metadata.homepage.as_ref().unwrap_or(&String::new()),
- )
- .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":"))
- .cwd(pkg.root());
- Ok(cmd)
- }
-}
-
-fn pre_version_component(v: &Version) -> String {
- if v.pre.is_empty() {
- return String::new();
- }
-
- let mut ret = String::new();
-
- for (i, x) in v.pre.iter().enumerate() {
- if i != 0 {
- ret.push('.')
- };
- ret.push_str(&x.to_string());
- }
-
- ret
-}
+++ /dev/null
-use std::collections::HashMap;
-use std::env;
-use std::fmt;
-use std::hash::{Hash, Hasher, SipHasher};
-use std::path::{Path, PathBuf};
-use std::sync::Arc;
-
-use lazycell::LazyCell;
-
-use core::{TargetKind, Workspace};
-use super::{Context, FileFlavor, Kind, Layout, Unit};
-use util::{self, CargoResult};
-
-#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)]
-pub struct Metadata(u64);
-
-impl fmt::Display for Metadata {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "{:016x}", self.0)
- }
-}
-
-pub struct CompilationFiles<'a, 'cfg: 'a> {
- /// The target directory layout for the host (and target if it is the same as host)
- pub(super) host: Layout,
- /// The target directory layout for the target (if different from then host)
- pub(super) target: Option<Layout>,
- export_dir: Option<(PathBuf, Vec<Unit<'a>>)>,
- ws: &'a Workspace<'cfg>,
- metas: HashMap<Unit<'a>, Option<Metadata>>,
- /// For each Unit, a list all files produced.
- outputs: HashMap<Unit<'a>, LazyCell<Arc<Vec<OutputFile>>>>,
-}
-
-#[derive(Debug)]
-pub struct OutputFile {
- /// File name that will be produced by the build process (in `deps`).
- pub path: PathBuf,
- /// If it should be linked into `target`, and what it should be called
- /// (e.g. without metadata).
- pub hardlink: Option<PathBuf>,
- /// Type of the file (library / debug symbol / else).
- pub flavor: FileFlavor,
-}
-
-impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> {
- pub(super) fn new(
- roots: &[Unit<'a>],
- host: Layout,
- target: Option<Layout>,
- export_dir: Option<PathBuf>,
- ws: &'a Workspace<'cfg>,
- cx: &Context<'a, 'cfg>,
- ) -> CompilationFiles<'a, 'cfg> {
- let mut metas = HashMap::new();
- for unit in roots {
- metadata_of(unit, cx, &mut metas);
- }
- let outputs = metas
- .keys()
- .cloned()
- .map(|unit| (unit, LazyCell::new()))
- .collect();
- CompilationFiles {
- ws,
- host,
- target,
- export_dir: export_dir.map(|dir| (dir, roots.to_vec())),
- metas,
- outputs,
- }
- }
-
- /// Returns the appropriate directory layout for either a plugin or not.
- pub fn layout(&self, kind: Kind) -> &Layout {
- match kind {
- Kind::Host => &self.host,
- Kind::Target => self.target.as_ref().unwrap_or(&self.host),
- }
- }
-
- /// Get the metadata for a target in a specific profile
- /// We build to the path: "{filename}-{target_metadata}"
- /// We use a linking step to link/copy to a predictable filename
- /// like `target/debug/libfoo.{a,so,rlib}` and such.
- pub fn metadata(&self, unit: &Unit<'a>) -> Option<Metadata> {
- self.metas[unit].clone()
- }
-
- /// Get the short hash based only on the PackageId
- /// Used for the metadata when target_metadata returns None
- pub fn target_short_hash(&self, unit: &Unit) -> String {
- let hashable = unit.pkg.package_id().stable_hash(self.ws.root());
- util::short_hash(&hashable)
- }
-
- /// Returns the appropriate output directory for the specified package and
- /// target.
- pub fn out_dir(&self, unit: &Unit<'a>) -> PathBuf {
- if unit.profile.doc {
- self.layout(unit.kind).root().parent().unwrap().join("doc")
- } else if unit.target.is_custom_build() {
- self.build_script_dir(unit)
- } else if unit.target.is_example() {
- self.layout(unit.kind).examples().to_path_buf()
- } else {
- self.deps_dir(unit).to_path_buf()
- }
- }
-
- pub fn export_dir(&self, unit: &Unit<'a>) -> Option<PathBuf> {
- let &(ref dir, ref roots) = self.export_dir.as_ref()?;
- if roots.contains(unit) {
- Some(dir.clone())
- } else {
- None
- }
- }
-
- pub fn pkg_dir(&self, unit: &Unit<'a>) -> String {
- let name = unit.pkg.package_id().name();
- match self.metas[unit] {
- Some(ref meta) => format!("{}-{}", name, meta),
- None => format!("{}-{}", name, self.target_short_hash(unit)),
- }
- }
-
- /// Return the root of the build output tree
- pub fn target_root(&self) -> &Path {
- self.host.dest()
- }
-
- pub fn host_deps(&self) -> &Path {
- self.host.deps()
- }
-
- /// Returns the directories where Rust crate dependencies are found for the
- /// specified unit.
- pub fn deps_dir(&self, unit: &Unit) -> &Path {
- self.layout(unit.kind).deps()
- }
-
- pub fn fingerprint_dir(&self, unit: &Unit<'a>) -> PathBuf {
- let dir = self.pkg_dir(unit);
- self.layout(unit.kind).fingerprint().join(dir)
- }
-
- /// Returns the appropriate directory layout for either a plugin or not.
- pub fn build_script_dir(&self, unit: &Unit<'a>) -> PathBuf {
- assert!(unit.target.is_custom_build());
- assert!(!unit.profile.run_custom_build);
- let dir = self.pkg_dir(unit);
- self.layout(Kind::Host).build().join(dir)
- }
-
- /// Returns the appropriate directory layout for either a plugin or not.
- pub fn build_script_out_dir(&self, unit: &Unit<'a>) -> PathBuf {
- assert!(unit.target.is_custom_build());
- assert!(unit.profile.run_custom_build);
- let dir = self.pkg_dir(unit);
- self.layout(unit.kind).build().join(dir).join("out")
- }
-
- /// Returns the file stem for a given target/profile combo (with metadata)
- pub fn file_stem(&self, unit: &Unit<'a>) -> String {
- match self.metas[unit] {
- Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), metadata),
- None => self.bin_stem(unit),
- }
- }
-
- pub(super) fn outputs(
- &self,
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- ) -> CargoResult<Arc<Vec<OutputFile>>> {
- self.outputs[unit]
- .try_borrow_with(|| self.calc_outputs(unit, cx))
- .map(Arc::clone)
- }
-
- /// Returns the bin stem for a given target (without metadata)
- fn bin_stem(&self, unit: &Unit) -> String {
- if unit.target.allows_underscores() {
- unit.target.name().to_string()
- } else {
- unit.target.crate_name()
- }
- }
-
- /// Returns a tuple with the directory and name of the hard link we expect
- /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef
- /// and link_stem would be out_dir/foo
- /// This function returns it in two parts so the caller can add prefix/suffix
- /// to filename separately
- ///
- /// Returns an Option because in some cases we don't want to link
- /// (eg a dependent lib)
- fn link_stem(&self, unit: &Unit<'a>) -> Option<(PathBuf, String)> {
- let out_dir = self.out_dir(unit);
- let bin_stem = self.bin_stem(unit);
- let file_stem = self.file_stem(unit);
-
- // We currently only lift files up from the `deps` directory. If
- // it was compiled into something like `example/` or `doc/` then
- // we don't want to link it up.
- if out_dir.ends_with("deps") {
- // Don't lift up library dependencies
- if self.ws.members().find(|&p| p == unit.pkg).is_none() && !unit.target.is_bin() {
- None
- } else {
- Some((
- out_dir.parent().unwrap().to_owned(),
- if unit.profile.test {
- file_stem
- } else {
- bin_stem
- },
- ))
- }
- } else if bin_stem == file_stem {
- None
- } else if out_dir.ends_with("examples") || out_dir.parent().unwrap().ends_with("build") {
- Some((out_dir, bin_stem))
- } else {
- None
- }
- }
-
- fn calc_outputs(
- &self,
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- ) -> CargoResult<Arc<Vec<OutputFile>>> {
- let out_dir = self.out_dir(unit);
- let file_stem = self.file_stem(unit);
- let link_stem = self.link_stem(unit);
- let info = if unit.target.for_host() {
- &cx.host_info
- } else {
- &cx.target_info
- };
-
- let mut ret = Vec::new();
- let mut unsupported = Vec::new();
- {
- if unit.profile.check {
- let path = out_dir.join(format!("lib{}.rmeta", file_stem));
- let hardlink = link_stem
- .clone()
- .map(|(ld, ls)| ld.join(format!("lib{}.rmeta", ls)));
- ret.push(OutputFile {
- path,
- hardlink,
- flavor: FileFlavor::Linkable,
- });
- } else {
- let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> {
- let crate_type = if crate_type == "lib" {
- "rlib"
- } else {
- crate_type
- };
- let file_types = info.file_types(
- crate_type,
- flavor,
- unit.target.kind(),
- cx.target_triple(),
- )?;
-
- match file_types {
- Some(types) => for file_type in types {
- let path = out_dir.join(file_type.filename(&file_stem));
- let hardlink = link_stem
- .as_ref()
- .map(|&(ref ld, ref ls)| ld.join(file_type.filename(ls)));
- ret.push(OutputFile {
- path,
- hardlink,
- flavor: file_type.flavor,
- });
- },
- // not supported, don't worry about it
- None => {
- unsupported.push(crate_type.to_string());
- }
- }
- Ok(())
- };
- //info!("{:?}", unit);
- match *unit.target.kind() {
- TargetKind::Bin
- | TargetKind::CustomBuild
- | TargetKind::ExampleBin
- | TargetKind::Bench
- | TargetKind::Test => {
- add("bin", FileFlavor::Normal)?;
- }
- TargetKind::Lib(..) | TargetKind::ExampleLib(..) if unit.profile.test => {
- add("bin", FileFlavor::Normal)?;
- }
- TargetKind::ExampleLib(ref kinds) | TargetKind::Lib(ref kinds) => {
- for kind in kinds {
- add(
- kind.crate_type(),
- if kind.linkable() {
- FileFlavor::Linkable
- } else {
- FileFlavor::Normal
- },
- )?;
- }
- }
- }
- }
- }
- if ret.is_empty() {
- if !unsupported.is_empty() {
- bail!(
- "cannot produce {} for `{}` as the target `{}` \
- does not support these crate types",
- unsupported.join(", "),
- unit.pkg,
- cx.target_triple()
- )
- }
- bail!(
- "cannot compile `{}` as the target `{}` does not \
- support any of the output crate types",
- unit.pkg,
- cx.target_triple()
- );
- }
- info!("Target filenames: {:?}", ret);
-
- Ok(Arc::new(ret))
- }
-}
-
-fn metadata_of<'a, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- metas: &mut HashMap<Unit<'a>, Option<Metadata>>,
-) -> Option<Metadata> {
- if !metas.contains_key(unit) {
- let meta = compute_metadata(unit, cx, metas);
- metas.insert(*unit, meta);
- for unit in cx.dep_targets(unit) {
- metadata_of(&unit, cx, metas);
- }
- }
- metas[unit].clone()
-}
-
-fn compute_metadata<'a, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- metas: &mut HashMap<Unit<'a>, Option<Metadata>>,
-) -> Option<Metadata> {
- // No metadata for dylibs because of a couple issues
- // - OSX encodes the dylib name in the executable
- // - Windows rustc multiple files of which we can't easily link all of them
- //
- // No metadata for bin because of an issue
- // - wasm32 rustc/emcc encodes the .wasm name in the .js (rust-lang/cargo#4535)
- //
- // Two exceptions
- // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict)
- // 2) __CARGO_DEFAULT_LIB_METADATA env var
- //
- // Note, though, that the compiler's build system at least wants
- // path dependencies (eg libstd) to have hashes in filenames. To account for
- // that we have an extra hack here which reads the
- // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a
- // hash in the filename if that's present.
- //
- // This environment variable should not be relied on! It's
- // just here for rustbuild. We need a more principled method
- // doing this eventually.
- let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA");
- if !(unit.profile.test || unit.profile.check)
- && (unit.target.is_dylib() || unit.target.is_cdylib()
- || (unit.target.is_bin() && cx.target_triple().starts_with("wasm32-")))
- && unit.pkg.package_id().source_id().is_path()
- && __cargo_default_lib_metadata.is_err()
- {
- return None;
- }
-
- let mut hasher = SipHasher::new_with_keys(0, 0);
-
- // Unique metadata per (name, source, version) triple. This'll allow us
- // to pull crates from anywhere w/o worrying about conflicts
- unit.pkg
- .package_id()
- .stable_hash(cx.ws.root())
- .hash(&mut hasher);
-
- // Add package properties which map to environment variables
- // exposed by Cargo
- let manifest_metadata = unit.pkg.manifest().metadata();
- manifest_metadata.authors.hash(&mut hasher);
- manifest_metadata.description.hash(&mut hasher);
- manifest_metadata.homepage.hash(&mut hasher);
-
- // Also mix in enabled features to our metadata. This'll ensure that
- // when changing feature sets each lib is separately cached.
- cx.resolve
- .features_sorted(unit.pkg.package_id())
- .hash(&mut hasher);
-
- // Mix in the target-metadata of all the dependencies of this target
- {
- let mut deps_metadata = cx.dep_targets(unit)
- .iter()
- .map(|dep| metadata_of(dep, cx, metas))
- .collect::<Vec<_>>();
- deps_metadata.sort();
- deps_metadata.hash(&mut hasher);
- }
-
- // Throw in the profile we're compiling with. This helps caching
- // panic=abort and panic=unwind artifacts, additionally with various
- // settings like debuginfo and whatnot.
- unit.profile.hash(&mut hasher);
-
- // Artifacts compiled for the host should have a different metadata
- // piece than those compiled for the target, so make sure we throw in
- // the unit's `kind` as well
- unit.kind.hash(&mut hasher);
-
- // Finally throw in the target name/kind. This ensures that concurrent
- // compiles of targets in the same crate don't collide.
- unit.target.name().hash(&mut hasher);
- unit.target.kind().hash(&mut hasher);
-
- if let Ok(rustc) = cx.config.rustc() {
- rustc.verbose_version.hash(&mut hasher);
- }
-
- // Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present.
- // This should be the release channel, to get a different hash for each channel.
- if let Ok(ref channel) = __cargo_default_lib_metadata {
- channel.hash(&mut hasher);
- }
- Some(Metadata(hasher.finish()))
-}
+++ /dev/null
-#![allow(deprecated)]
-
-use std::collections::{HashMap, HashSet};
-use std::env;
-use std::path::{Path, PathBuf};
-use std::str::{self, FromStr};
-use std::sync::Arc;
-
-use jobserver::Client;
-
-use core::{Package, PackageId, PackageSet, Profile, Resolve, Target};
-use core::{Dependency, Profiles, Workspace};
-use util::{internal, profile, Cfg, CfgExpr, Config};
-use util::errors::{CargoResult, CargoResultExt};
-
-use super::TargetConfig;
-use super::custom_build::{self, BuildDeps, BuildScripts, BuildState};
-use super::fingerprint::Fingerprint;
-use super::job_queue::JobQueue;
-use super::layout::Layout;
-use super::links::Links;
-use super::{BuildConfig, Compilation, Executor, Kind};
-
-mod unit_dependencies;
-use self::unit_dependencies::build_unit_dependencies;
-
-mod compilation_files;
-use self::compilation_files::{CompilationFiles, OutputFile};
-pub use self::compilation_files::Metadata;
-
-mod target_info;
-pub use self::target_info::FileFlavor;
-use self::target_info::TargetInfo;
-
-/// All information needed to define a Unit.
-///
-/// A unit is an object that has enough information so that cargo knows how to build it.
-/// For example, if your project has dependencies, then every dependency will be built as a library
-/// unit. If your project is a library, then it will be built as a library unit as well, or if it
-/// is a binary with `main.rs`, then a binary will be output. There are also separate unit types
-/// for `test`ing and `check`ing, amongst others.
-///
-/// The unit also holds information about all possible metadata about the package in `pkg`.
-///
-/// A unit needs to know extra information in addition to the type and root source file. For
-/// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know
-/// whether you want a debug or release build. There is enough information in this struct to figure
-/// all that out.
-#[derive(Clone, Copy, Eq, PartialEq, Hash)]
-pub struct Unit<'a> {
- /// Information about available targets, which files to include/exclude, etc. Basically stuff in
- /// `Cargo.toml`.
- pub pkg: &'a Package,
- /// Information about the specific target to build, out of the possible targets in `pkg`. Not
- /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a
- /// build.
- pub target: &'a Target,
- /// The profile contains information about *how* the build should be run, including debug
- /// level, extra args to pass to rustc, etc.
- pub profile: &'a Profile,
- /// Whether this compilation unit is for the host or target architecture.
- ///
- /// For example, when
- /// cross compiling and using a custom build script, the build script needs to be compiled for
- /// the host architecture so the host rustc can use it (when compiling to the target
- /// architecture).
- pub kind: Kind,
-}
-
-/// The build context, containing all information about a build task
-pub struct Context<'a, 'cfg: 'a> {
- /// The workspace the build is for
- pub ws: &'a Workspace<'cfg>,
- /// The cargo configuration
- pub config: &'cfg Config,
- /// The dependency graph for our build
- pub resolve: &'a Resolve,
- /// Information on the compilation output
- pub compilation: Compilation<'cfg>,
- pub packages: &'a PackageSet<'cfg>,
- pub build_state: Arc<BuildState>,
- pub build_script_overridden: HashSet<(PackageId, Kind)>,
- pub build_explicit_deps: HashMap<Unit<'a>, BuildDeps>,
- pub fingerprints: HashMap<Unit<'a>, Arc<Fingerprint>>,
- pub compiled: HashSet<Unit<'a>>,
- pub build_config: BuildConfig,
- pub build_scripts: HashMap<Unit<'a>, Arc<BuildScripts>>,
- pub links: Links<'a>,
- pub used_in_plugin: HashSet<Unit<'a>>,
- pub jobserver: Client,
-
- target_info: TargetInfo,
- host_info: TargetInfo,
- profiles: &'a Profiles,
- incremental_env: Option<bool>,
-
- unit_dependencies: HashMap<Unit<'a>, Vec<Unit<'a>>>,
- files: Option<CompilationFiles<'a, 'cfg>>,
-}
-
-impl<'a, 'cfg> Context<'a, 'cfg> {
- pub fn new(
- ws: &'a Workspace<'cfg>,
- resolve: &'a Resolve,
- packages: &'a PackageSet<'cfg>,
- config: &'cfg Config,
- build_config: BuildConfig,
- profiles: &'a Profiles,
- ) -> CargoResult<Context<'a, 'cfg>> {
- let incremental_env = match env::var("CARGO_INCREMENTAL") {
- Ok(v) => Some(v == "1"),
- Err(_) => None,
- };
-
- // Load up the jobserver that we'll use to manage our parallelism. This
- // is the same as the GNU make implementation of a jobserver, and
- // intentionally so! It's hoped that we can interact with GNU make and
- // all share the same jobserver.
- //
- // Note that if we don't have a jobserver in our environment then we
- // create our own, and we create it with `n-1` tokens because one token
- // is ourself, a running process.
- let jobserver = match config.jobserver_from_env() {
- Some(c) => c.clone(),
- None => Client::new(build_config.jobs as usize - 1)
- .chain_err(|| "failed to create jobserver")?,
- };
- let mut cx = Context {
- ws,
- resolve,
- packages,
- config,
- target_info: TargetInfo::default(),
- host_info: TargetInfo::default(),
- compilation: Compilation::new(config),
- build_state: Arc::new(BuildState::new(&build_config)),
- build_config,
- fingerprints: HashMap::new(),
- profiles,
- compiled: HashSet::new(),
- build_scripts: HashMap::new(),
- build_explicit_deps: HashMap::new(),
- links: Links::new(),
- used_in_plugin: HashSet::new(),
- incremental_env,
- jobserver,
- build_script_overridden: HashSet::new(),
-
- unit_dependencies: HashMap::new(),
- files: None,
- };
-
- cx.probe_target_info()?;
- Ok(cx)
- }
-
- // Returns a mapping of the root package plus its immediate dependencies to
- // where the compiled libraries are all located.
- pub fn compile(
- mut self,
- units: &[Unit<'a>],
- export_dir: Option<PathBuf>,
- exec: &Arc<Executor>,
- ) -> CargoResult<Compilation<'cfg>> {
- let mut queue = JobQueue::new(&self);
- self.prepare_units(export_dir, units)?;
- self.prepare()?;
- self.build_used_in_plugin_map(&units)?;
- custom_build::build_map(&mut self, &units)?;
-
- for unit in units.iter() {
- // Build up a list of pending jobs, each of which represent
- // compiling a particular package. No actual work is executed as
- // part of this, that's all done next as part of the `execute`
- // function which will run everything in order with proper
- // parallelism.
- super::compile(&mut self, &mut queue, unit, exec)?;
- }
-
- // Now that we've figured out everything that we're going to do, do it!
- queue.execute(&mut self)?;
-
- for unit in units.iter() {
- for output in self.outputs(unit)?.iter() {
- if output.flavor == FileFlavor::DebugInfo {
- continue;
- }
-
- let bindst = match output.hardlink {
- Some(ref link_dst) => link_dst,
- None => &output.path,
- };
-
- if unit.profile.test {
- self.compilation.tests.push((
- unit.pkg.clone(),
- unit.target.kind().clone(),
- unit.target.name().to_string(),
- output.path.clone(),
- ));
- } else if unit.target.is_bin() || unit.target.is_example() {
- self.compilation.binaries.push(bindst.clone());
- } else if unit.target.is_lib() {
- let pkgid = unit.pkg.package_id().clone();
- self.compilation
- .libraries
- .entry(pkgid)
- .or_insert_with(HashSet::new)
- .insert((unit.target.clone(), output.path.clone()));
- }
- }
-
- for dep in self.dep_targets(unit).iter() {
- if !unit.target.is_lib() {
- continue;
- }
-
- if dep.profile.run_custom_build {
- let out_dir = self.files().build_script_out_dir(dep).display().to_string();
- self.compilation
- .extra_env
- .entry(dep.pkg.package_id().clone())
- .or_insert_with(Vec::new)
- .push(("OUT_DIR".to_string(), out_dir));
- }
-
- if !dep.target.is_lib() {
- continue;
- }
- if dep.profile.doc {
- continue;
- }
-
- let outputs = self.outputs(dep)?;
- self.compilation
- .libraries
- .entry(unit.pkg.package_id().clone())
- .or_insert_with(HashSet::new)
- .extend(
- outputs
- .iter()
- .map(|output| (dep.target.clone(), output.path.clone())),
- );
- }
-
- let feats = self.resolve.features(unit.pkg.package_id());
- if !feats.is_empty() {
- self.compilation
- .cfgs
- .entry(unit.pkg.package_id().clone())
- .or_insert_with(|| {
- feats
- .iter()
- .map(|feat| format!("feature=\"{}\"", feat))
- .collect()
- });
- }
- let rustdocflags = self.rustdocflags_args(unit)?;
- if !rustdocflags.is_empty() {
- self.compilation
- .rustdocflags
- .entry(unit.pkg.package_id().clone())
- .or_insert(rustdocflags);
- }
-
- super::output_depinfo(&mut self, unit)?;
- }
-
- for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() {
- self.compilation
- .cfgs
- .entry(pkg.clone())
- .or_insert_with(HashSet::new)
- .extend(output.cfgs.iter().cloned());
-
- self.compilation
- .extra_env
- .entry(pkg.clone())
- .or_insert_with(Vec::new)
- .extend(output.env.iter().cloned());
-
- for dir in output.library_paths.iter() {
- self.compilation.native_dirs.insert(dir.clone());
- }
- }
- self.compilation.target = self.target_triple().to_string();
- Ok(self.compilation)
- }
-
- pub fn prepare_units(
- &mut self,
- export_dir: Option<PathBuf>,
- units: &[Unit<'a>],
- ) -> CargoResult<()> {
- let dest = if self.build_config.release {
- "release"
- } else {
- "debug"
- };
- let host_layout = Layout::new(self.ws, None, dest)?;
- let target_layout = match self.build_config.requested_target.as_ref() {
- Some(target) => Some(Layout::new(self.ws, Some(target), dest)?),
- None => None,
- };
-
- let deps = build_unit_dependencies(units, &self)?;
- self.unit_dependencies = deps;
- let files = CompilationFiles::new(
- units,
- host_layout,
- target_layout,
- export_dir,
- self.ws,
- &self,
- );
- self.files = Some(files);
- Ok(())
- }
-
- /// Prepare this context, ensuring that all filesystem directories are in
- /// place.
- pub fn prepare(&mut self) -> CargoResult<()> {
- let _p = profile::start("preparing layout");
-
- self.files_mut()
- .host
- .prepare()
- .chain_err(|| internal("couldn't prepare build directories"))?;
- if let Some(ref mut target) = self.files.as_mut().unwrap().target {
- target
- .prepare()
- .chain_err(|| internal("couldn't prepare build directories"))?;
- }
-
- self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf();
-
- let files = self.files.as_ref().unwrap();
- let layout = files.target.as_ref().unwrap_or(&files.host);
- self.compilation.root_output = layout.dest().to_path_buf();
- self.compilation.deps_output = layout.deps().to_path_buf();
- Ok(())
- }
-
- /// Ensure that we've collected all target-specific information to compile
- /// all the units mentioned in `units`.
- fn probe_target_info(&mut self) -> CargoResult<()> {
- let _p = profile::start("Context::probe_target_info");
- debug!("probe_target_info");
- let host_target_same = match self.requested_target() {
- Some(s) if s != self.config.rustc()?.host => false,
- _ => true,
- };
-
- self.host_info = TargetInfo::new(self, Kind::Host)?;
- self.target_info = if host_target_same {
- self.host_info.clone()
- } else {
- TargetInfo::new(self, Kind::Target)?
- };
- self.compilation.host_dylib_path = self.host_info.sysroot_libdir.clone();
- self.compilation.target_dylib_path = self.target_info.sysroot_libdir.clone();
- Ok(())
- }
-
- /// Builds up the `used_in_plugin` internal to this context from the list of
- /// top-level units.
- ///
- /// This will recursively walk `units` and all of their dependencies to
- /// determine which crate are going to be used in plugins or not.
- pub fn build_used_in_plugin_map(&mut self, units: &[Unit<'a>]) -> CargoResult<()> {
- let mut visited = HashSet::new();
- for unit in units {
- self.walk_used_in_plugin_map(unit, unit.target.for_host(), &mut visited)?;
- }
- Ok(())
- }
-
- fn walk_used_in_plugin_map(
- &mut self,
- unit: &Unit<'a>,
- is_plugin: bool,
- visited: &mut HashSet<(Unit<'a>, bool)>,
- ) -> CargoResult<()> {
- if !visited.insert((*unit, is_plugin)) {
- return Ok(());
- }
- if is_plugin {
- self.used_in_plugin.insert(*unit);
- }
- for unit in self.dep_targets(unit) {
- self.walk_used_in_plugin_map(&unit, is_plugin || unit.target.for_host(), visited)?;
- }
- Ok(())
- }
-
- pub fn files(&self) -> &CompilationFiles<'a, 'cfg> {
- self.files.as_ref().unwrap()
- }
-
- fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> {
- self.files.as_mut().unwrap()
- }
-
- /// Return the host triple for this context
- pub fn host_triple(&self) -> &str {
- &self.build_config.host_triple
- }
-
- /// Return the target triple which this context is targeting.
- pub fn target_triple(&self) -> &str {
- self.requested_target()
- .unwrap_or_else(|| self.host_triple())
- }
-
- /// Requested (not actual) target for the build
- pub fn requested_target(&self) -> Option<&str> {
- self.build_config.requested_target.as_ref().map(|s| &s[..])
- }
-
- /// Return the filenames that the given target for the given profile will
- /// generate as a list of 3-tuples (filename, link_dst, linkable)
- ///
- /// - filename: filename rustc compiles to. (Often has metadata suffix).
- /// - link_dst: Optional file to link/copy the result to (without metadata suffix)
- /// - linkable: Whether possible to link against file (eg it's a library)
- pub fn outputs(&mut self, unit: &Unit<'a>) -> CargoResult<Arc<Vec<OutputFile>>> {
- self.files.as_ref().unwrap().outputs(unit, self)
- }
-
- /// For a package, return all targets which are registered as dependencies
- /// for that package.
- // TODO: this ideally should be `-> &[Unit<'a>]`
- pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec<Unit<'a>> {
- // If this build script's execution has been overridden then we don't
- // actually depend on anything, we've reached the end of the dependency
- // chain as we've got all the info we're gonna get.
- //
- // Note there's a subtlety about this piece of code! The
- // `build_script_overridden` map here is populated in
- // `custom_build::build_map` which you need to call before inspecting
- // dependencies. However, that code itself calls this method and
- // gets a full pre-filtered set of dependencies. This is not super
- // obvious, and clear, but it does work at the moment.
- if unit.profile.run_custom_build {
- let key = (unit.pkg.package_id().clone(), unit.kind);
- if self.build_script_overridden.contains(&key) {
- return Vec::new();
- }
- }
- self.unit_dependencies[unit].clone()
- }
-
- fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool {
- // If this dependency is only available for certain platforms,
- // make sure we're only enabling it for that platform.
- let platform = match dep.platform() {
- Some(p) => p,
- None => return true,
- };
- let (name, info) = match kind {
- Kind::Host => (self.host_triple(), &self.host_info),
- Kind::Target => (self.target_triple(), &self.target_info),
- };
- platform.matches(name, info.cfg())
- }
-
- /// Gets a package for the given package id.
- pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> {
- self.packages.get(id)
- }
-
- /// Get the user-specified linker for a particular host or target
- pub fn linker(&self, kind: Kind) -> Option<&Path> {
- self.target_config(kind).linker.as_ref().map(|s| s.as_ref())
- }
-
- /// Get the user-specified `ar` program for a particular host or target
- pub fn ar(&self, kind: Kind) -> Option<&Path> {
- self.target_config(kind).ar.as_ref().map(|s| s.as_ref())
- }
-
- /// Get the list of cfg printed out from the compiler for the specified kind
- pub fn cfg(&self, kind: Kind) -> &[Cfg] {
- let info = match kind {
- Kind::Host => &self.host_info,
- Kind::Target => &self.target_info,
- };
- info.cfg().unwrap_or(&[])
- }
-
- /// Get the target configuration for a particular host or target
- fn target_config(&self, kind: Kind) -> &TargetConfig {
- match kind {
- Kind::Host => &self.build_config.host,
- Kind::Target => &self.build_config.target,
- }
- }
-
- /// Number of jobs specified for this build
- pub fn jobs(&self) -> u32 {
- self.build_config.jobs
- }
-
- pub fn lib_profile(&self) -> &'a Profile {
- let (normal, test) = if self.build_config.release {
- (&self.profiles.release, &self.profiles.bench_deps)
- } else {
- (&self.profiles.dev, &self.profiles.test_deps)
- };
- if self.build_config.test {
- test
- } else {
- normal
- }
- }
-
- pub fn build_script_profile(&self, _pkg: &PackageId) -> &'a Profile {
- // TODO: should build scripts always be built with the same library
- // profile? How is this controlled at the CLI layer?
- self.lib_profile()
- }
-
- pub fn incremental_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
- // There's a number of ways to configure incremental compilation right
- // now. In order of descending priority (first is highest priority) we
- // have:
- //
- // * `CARGO_INCREMENTAL` - this is blanket used unconditionally to turn
- // on/off incremental compilation for any cargo subcommand. We'll
- // respect this if set.
- // * `build.incremental` - in `.cargo/config` this blanket key can
- // globally for a system configure whether incremental compilation is
- // enabled. Note that setting this to `true` will not actually affect
- // all builds though. For example a `true` value doesn't enable
- // release incremental builds, only dev incremental builds. This can
- // be useful to globally disable incremental compilation like
- // `CARGO_INCREMENTAL`.
- // * `profile.dev.incremental` - in `Cargo.toml` specific profiles can
- // be configured to enable/disable incremental compilation. This can
- // be primarily used to disable incremental when buggy for a project.
- // * Finally, each profile has a default for whether it will enable
- // incremental compilation or not. Primarily development profiles
- // have it enabled by default while release profiles have it disabled
- // by default.
- let global_cfg = self.config.get_bool("build.incremental")?.map(|c| c.val);
- let incremental = match (self.incremental_env, global_cfg, unit.profile.incremental) {
- (Some(v), _, _) => v,
- (None, Some(false), _) => false,
- (None, _, other) => other,
- };
-
- if !incremental {
- return Ok(Vec::new());
- }
-
- // Only enable incremental compilation for sources the user can
- // modify (aka path sources). For things that change infrequently,
- // non-incremental builds yield better performance in the compiler
- // itself (aka crates.io / git dependencies)
- //
- // (see also https://github.com/rust-lang/cargo/issues/3972)
- if !unit.pkg.package_id().source_id().is_path() {
- return Ok(Vec::new());
- }
-
- let dir = self.files().layout(unit.kind).incremental().display();
- Ok(vec!["-C".to_string(), format!("incremental={}", dir)])
- }
-
- pub fn rustflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
- env_args(
- self.config,
- &self.build_config,
- self.info(&unit.kind).cfg(),
- unit.kind,
- "RUSTFLAGS",
- )
- }
-
- pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult<Vec<String>> {
- env_args(
- self.config,
- &self.build_config,
- self.info(&unit.kind).cfg(),
- unit.kind,
- "RUSTDOCFLAGS",
- )
- }
-
- pub fn show_warnings(&self, pkg: &PackageId) -> bool {
- pkg.source_id().is_path() || self.config.extra_verbose()
- }
-
- fn info(&self, kind: &Kind) -> &TargetInfo {
- match *kind {
- Kind::Host => &self.host_info,
- Kind::Target => &self.target_info,
- }
- }
-}
-
-/// Acquire extra flags to pass to the compiler from various locations.
-///
-/// The locations are:
-///
-/// - the `RUSTFLAGS` environment variable
-///
-/// then if this was not found
-///
-/// - `target.*.rustflags` from the manifest (Cargo.toml)
-/// - `target.cfg(..).rustflags` from the manifest
-///
-/// then if neither of these were found
-///
-/// - `build.rustflags` from the manifest
-///
-/// Note that if a `target` is specified, no args will be passed to host code (plugins, build
-/// scripts, ...), even if it is the same as the target.
-fn env_args(
- config: &Config,
- build_config: &BuildConfig,
- target_cfg: Option<&[Cfg]>,
- kind: Kind,
- name: &str,
-) -> CargoResult<Vec<String>> {
- // We *want* to apply RUSTFLAGS only to builds for the
- // requested target architecture, and not to things like build
- // scripts and plugins, which may be for an entirely different
- // architecture. Cargo's present architecture makes it quite
- // hard to only apply flags to things that are not build
- // scripts and plugins though, so we do something more hacky
- // instead to avoid applying the same RUSTFLAGS to multiple targets
- // arches:
- //
- // 1) If --target is not specified we just apply RUSTFLAGS to
- // all builds; they are all going to have the same target.
- //
- // 2) If --target *is* specified then we only apply RUSTFLAGS
- // to compilation units with the Target kind, which indicates
- // it was chosen by the --target flag.
- //
- // This means that, e.g. even if the specified --target is the
- // same as the host, build scripts in plugins won't get
- // RUSTFLAGS.
- let compiling_with_target = build_config.requested_target.is_some();
- let is_target_kind = kind == Kind::Target;
-
- if compiling_with_target && !is_target_kind {
- // This is probably a build script or plugin and we're
- // compiling with --target. In this scenario there are
- // no rustflags we can apply.
- return Ok(Vec::new());
- }
-
- // First try RUSTFLAGS from the environment
- if let Ok(a) = env::var(name) {
- let args = a.split(' ')
- .map(str::trim)
- .filter(|s| !s.is_empty())
- .map(str::to_string);
- return Ok(args.collect());
- }
-
- let mut rustflags = Vec::new();
-
- let name = name.chars()
- .flat_map(|c| c.to_lowercase())
- .collect::<String>();
- // Then the target.*.rustflags value...
- let target = build_config
- .requested_target
- .as_ref()
- .unwrap_or(&build_config.host_triple);
- let key = format!("target.{}.{}", target, name);
- if let Some(args) = config.get_list_or_split_string(&key)? {
- let args = args.val.into_iter();
- rustflags.extend(args);
- }
- // ...including target.'cfg(...)'.rustflags
- if let Some(target_cfg) = target_cfg {
- if let Some(table) = config.get_table("target")? {
- let cfgs = table.val.keys().filter_map(|t| {
- if t.starts_with("cfg(") && t.ends_with(')') {
- let cfg = &t[4..t.len() - 1];
- CfgExpr::from_str(cfg).ok().and_then(|c| {
- if c.matches(target_cfg) {
- Some(t)
- } else {
- None
- }
- })
- } else {
- None
- }
- });
-
- // Note that we may have multiple matching `[target]` sections and
- // because we're passing flags to the compiler this can affect
- // cargo's caching and whether it rebuilds. Ensure a deterministic
- // ordering through sorting for now. We may perhaps one day wish to
- // ensure a deterministic ordering via the order keys were defined
- // in files perhaps.
- let mut cfgs = cfgs.collect::<Vec<_>>();
- cfgs.sort();
-
- for n in cfgs {
- let key = format!("target.{}.{}", n, name);
- if let Some(args) = config.get_list_or_split_string(&key)? {
- let args = args.val.into_iter();
- rustflags.extend(args);
- }
- }
- }
- }
-
- if !rustflags.is_empty() {
- return Ok(rustflags);
- }
-
- // Then the build.rustflags value
- let key = format!("build.{}", name);
- if let Some(args) = config.get_list_or_split_string(&key)? {
- let args = args.val.into_iter();
- return Ok(args.collect());
- }
-
- Ok(Vec::new())
-}
+++ /dev/null
-use std::cell::RefCell;
-use std::collections::hash_map::{Entry, HashMap};
-use std::path::PathBuf;
-use std::str::{self, FromStr};
-
-use super::{env_args, Context};
-use util::{CargoResult, CargoResultExt, Cfg, ProcessBuilder};
-use core::TargetKind;
-use super::Kind;
-
-#[derive(Clone, Default)]
-pub struct TargetInfo {
- crate_type_process: Option<ProcessBuilder>,
- crate_types: RefCell<HashMap<String, Option<(String, String)>>>,
- cfg: Option<Vec<Cfg>>,
- pub sysroot_libdir: Option<PathBuf>,
-}
-
-/// Type of each file generated by a Unit.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-pub enum FileFlavor {
- /// Not a special file type.
- Normal,
- /// It is something you can link against (e.g. a library)
- Linkable,
- /// It is a piece of external debug information (e.g. *.dSYM and *.pdb)
- DebugInfo,
-}
-
-pub struct FileType {
- pub flavor: FileFlavor,
- suffix: String,
- prefix: String,
- // wasm bin target will generate two files in deps such as
- // "web-stuff.js" and "web_stuff.wasm". Note the different usages of
- // "-" and "_". should_replace_hyphens is a flag to indicate that
- // we need to convert the stem "web-stuff" to "web_stuff", so we
- // won't miss "web_stuff.wasm".
- should_replace_hyphens: bool,
-}
-
-impl FileType {
- pub fn filename(&self, stem: &str) -> String {
- let stem = if self.should_replace_hyphens {
- stem.replace("-", "_")
- } else {
- stem.to_string()
- };
- format!("{}{}{}", self.prefix, stem, self.suffix)
- }
-}
-
-impl TargetInfo {
- pub fn new(cx: &Context, kind: Kind) -> CargoResult<TargetInfo> {
- let rustflags = env_args(cx.config, &cx.build_config, None, kind, "RUSTFLAGS")?;
- let mut process = cx.config.rustc()?.process();
- process
- .arg("-")
- .arg("--crate-name")
- .arg("___")
- .arg("--print=file-names")
- .args(&rustflags)
- .env_remove("RUST_LOG");
-
- if kind == Kind::Target {
- process.arg("--target").arg(&cx.target_triple());
- }
-
- let crate_type_process = process.clone();
- const KNOWN_CRATE_TYPES: &[&str] =
- &["bin", "rlib", "dylib", "cdylib", "staticlib", "proc-macro"];
- for crate_type in KNOWN_CRATE_TYPES.iter() {
- process.arg("--crate-type").arg(crate_type);
- }
-
- let mut with_cfg = process.clone();
- with_cfg.arg("--print=sysroot");
- with_cfg.arg("--print=cfg");
-
- let mut has_cfg_and_sysroot = true;
- let output = with_cfg
- .exec_with_output()
- .or_else(|_| {
- has_cfg_and_sysroot = false;
- process.exec_with_output()
- })
- .chain_err(|| "failed to run `rustc` to learn about target-specific information")?;
-
- let error = str::from_utf8(&output.stderr).unwrap();
- let output = str::from_utf8(&output.stdout).unwrap();
- let mut lines = output.lines();
- let mut map = HashMap::new();
- for crate_type in KNOWN_CRATE_TYPES {
- let out = parse_crate_type(crate_type, error, &mut lines)?;
- map.insert(crate_type.to_string(), out);
- }
-
- let mut sysroot_libdir = None;
- if has_cfg_and_sysroot {
- let line = match lines.next() {
- Some(line) => line,
- None => bail!(
- "output of --print=sysroot missing when learning about \
- target-specific information from rustc"
- ),
- };
- let mut rustlib = PathBuf::from(line);
- if kind == Kind::Host {
- if cfg!(windows) {
- rustlib.push("bin");
- } else {
- rustlib.push("lib");
- }
- sysroot_libdir = Some(rustlib);
- } else {
- rustlib.push("lib");
- rustlib.push("rustlib");
- rustlib.push(cx.target_triple());
- rustlib.push("lib");
- sysroot_libdir = Some(rustlib);
- }
- }
-
- let cfg = if has_cfg_and_sysroot {
- Some(lines.map(Cfg::from_str).collect::<CargoResult<_>>()?)
- } else {
- None
- };
-
- Ok(TargetInfo {
- crate_type_process: Some(crate_type_process),
- crate_types: RefCell::new(map),
- cfg,
- sysroot_libdir,
- })
- }
-
- pub fn cfg(&self) -> Option<&[Cfg]> {
- self.cfg.as_ref().map(|v| v.as_ref())
- }
-
- pub fn file_types(
- &self,
- crate_type: &str,
- flavor: FileFlavor,
- kind: &TargetKind,
- target_triple: &str,
- ) -> CargoResult<Option<Vec<FileType>>> {
- let mut crate_types = self.crate_types.borrow_mut();
- let entry = crate_types.entry(crate_type.to_string());
- let crate_type_info = match entry {
- Entry::Occupied(o) => &*o.into_mut(),
- Entry::Vacant(v) => {
- let value = self.discover_crate_type(v.key())?;
- &*v.insert(value)
- }
- };
- let (prefix, suffix) = match *crate_type_info {
- Some((ref prefix, ref suffix)) => (prefix, suffix),
- None => return Ok(None),
- };
- let mut ret = vec![
- FileType {
- suffix: suffix.clone(),
- prefix: prefix.clone(),
- flavor,
- should_replace_hyphens: false,
- },
- ];
-
- // rust-lang/cargo#4500
- if target_triple.ends_with("pc-windows-msvc") && crate_type.ends_with("dylib")
- && suffix == ".dll"
- {
- ret.push(FileType {
- suffix: ".dll.lib".to_string(),
- prefix: prefix.clone(),
- flavor: FileFlavor::Normal,
- should_replace_hyphens: false,
- })
- }
-
- // rust-lang/cargo#4535
- if target_triple.starts_with("wasm32-") && crate_type == "bin" && suffix == ".js" {
- ret.push(FileType {
- suffix: ".wasm".to_string(),
- prefix: prefix.clone(),
- flavor: FileFlavor::Normal,
- should_replace_hyphens: true,
- })
- }
-
- // rust-lang/cargo#4490, rust-lang/cargo#4960
- // - only uplift debuginfo for binaries.
- // tests are run directly from target/debug/deps/
- // and examples are inside target/debug/examples/ which already have symbols next to them
- // so no need to do anything.
- if *kind == TargetKind::Bin {
- if target_triple.contains("-apple-") {
- ret.push(FileType {
- suffix: ".dSYM".to_string(),
- prefix: prefix.clone(),
- flavor: FileFlavor::DebugInfo,
- should_replace_hyphens: false,
- })
- } else if target_triple.ends_with("-msvc") {
- ret.push(FileType {
- suffix: ".pdb".to_string(),
- prefix: prefix.clone(),
- flavor: FileFlavor::DebugInfo,
- should_replace_hyphens: false,
- })
- }
- }
-
- Ok(Some(ret))
- }
-
- fn discover_crate_type(&self, crate_type: &str) -> CargoResult<Option<(String, String)>> {
- let mut process = self.crate_type_process.clone().unwrap();
-
- process.arg("--crate-type").arg(crate_type);
-
- let output = process.exec_with_output().chain_err(|| {
- format!(
- "failed to run `rustc` to learn about \
- crate-type {} information",
- crate_type
- )
- })?;
-
- let error = str::from_utf8(&output.stderr).unwrap();
- let output = str::from_utf8(&output.stdout).unwrap();
- Ok(parse_crate_type(crate_type, error, &mut output.lines())?)
- }
-}
-
-/// Takes rustc output (using specialized command line args), and calculates the file prefix and
-/// suffix for the given crate type, or returns None if the type is not supported. (e.g. for a
-/// rust library like libcargo.rlib, prefix = "lib", suffix = "rlib").
-///
-/// The caller needs to ensure that the lines object is at the correct line for the given crate
-/// type: this is not checked.
-// This function can not handle more than 1 file per type (with wasm32-unknown-emscripten, there
-// are 2 files for bin (.wasm and .js))
-fn parse_crate_type(
- crate_type: &str,
- error: &str,
- lines: &mut str::Lines,
-) -> CargoResult<Option<(String, String)>> {
- let not_supported = error.lines().any(|line| {
- (line.contains("unsupported crate type") || line.contains("unknown crate type"))
- && line.contains(crate_type)
- });
- if not_supported {
- return Ok(None);
- }
- let line = match lines.next() {
- Some(line) => line,
- None => bail!(
- "malformed output when learning about \
- crate-type {} information",
- crate_type
- ),
- };
- let mut parts = line.trim().split("___");
- let prefix = parts.next().unwrap();
- let suffix = match parts.next() {
- Some(part) => part,
- None => bail!(
- "output of --print=file-names has changed in \
- the compiler, cannot parse"
- ),
- };
-
- Ok(Some((prefix.to_string(), suffix.to_string())))
-}
+++ /dev/null
-//! Constructs the dependency graph for compilation.
-//!
-//! Rust code is typically organized as a set of Cargo packages. The
-//! dependencies between the packages themselves are stored in the
-//! `Resolve` struct. However, we can't use that information as is for
-//! compilation! A package typically contains several targets, or crates,
-//! and these targets has inter-dependencies. For example, you need to
-//! compile the `lib` target before the `bin` one, and you need to compile
-//! `build.rs` before either of those.
-//!
-//! So, we need to lower the `Resolve`, which specifies dependencies between
-//! *packages*, to a graph of dependencies between their *targets*, and this
-//! is exactly what this module is doing! Well, almost exactly: another
-//! complication is that we might want to compile the same target several times
-//! (for example, with and without tests), so we actually build a dependency
-//! graph of `Unit`s, which capture these properties.
-
-use super::{Context, Kind, Unit};
-use std::collections::HashMap;
-use CargoResult;
-use core::dependency::Kind as DepKind;
-use core::Target;
-use core::Profile;
-
-pub fn build_unit_dependencies<'a, 'cfg>(
- roots: &[Unit<'a>],
- cx: &Context<'a, 'cfg>,
-) -> CargoResult<HashMap<Unit<'a>, Vec<Unit<'a>>>> {
- let mut deps = HashMap::new();
- for unit in roots.iter() {
- deps_of(unit, cx, &mut deps)?;
- }
-
- Ok(deps)
-}
-
-fn deps_of<'a, 'b, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
-) -> CargoResult<&'b [Unit<'a>]> {
- if !deps.contains_key(unit) {
- let unit_deps = compute_deps(unit, cx, deps)?;
- deps.insert(*unit, unit_deps.clone());
- for unit in unit_deps {
- deps_of(&unit, cx, deps)?;
- }
- }
- Ok(deps[unit].as_ref())
-}
-
-/// For a package, return all targets which are registered as dependencies
-/// for that package.
-fn compute_deps<'a, 'b, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- deps: &'b mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
-) -> CargoResult<Vec<Unit<'a>>> {
- if unit.profile.run_custom_build {
- return compute_deps_custom_build(unit, cx, deps);
- } else if unit.profile.doc && !unit.profile.test {
- return compute_deps_doc(unit, cx);
- }
-
- let id = unit.pkg.package_id();
- let deps = cx.resolve.deps(id);
- let mut ret = deps.filter(|dep| {
- unit.pkg
- .dependencies()
- .iter()
- .filter(|d| d.name() == dep.name() && d.version_req().matches(dep.version()))
- .any(|d| {
- // If this target is a build command, then we only want build
- // dependencies, otherwise we want everything *other than* build
- // dependencies.
- if unit.target.is_custom_build() != d.is_build() {
- return false;
- }
-
- // If this dependency is *not* a transitive dependency, then it
- // only applies to test/example targets
- if !d.is_transitive() && !unit.target.is_test() && !unit.target.is_example()
- && !unit.profile.test
- {
- return false;
- }
-
- // If this dependency is only available for certain platforms,
- // make sure we're only enabling it for that platform.
- if !cx.dep_platform_activated(d, unit.kind) {
- return false;
- }
-
- // If the dependency is optional, then we're only activating it
- // if the corresponding feature was activated
- if d.is_optional() && !cx.resolve.features(id).contains(&*d.name()) {
- return false;
- }
-
- // If we've gotten past all that, then this dependency is
- // actually used!
- true
- })
- }).filter_map(|id| match cx.get_package(id) {
- Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| {
- let unit = Unit {
- pkg,
- target: t,
- profile: lib_or_check_profile(unit, t, cx),
- kind: unit.kind.for_target(t),
- };
- Ok(unit)
- }),
- Err(e) => Some(Err(e)),
- })
- .collect::<CargoResult<Vec<_>>>()?;
-
- // If this target is a build script, then what we've collected so far is
- // all we need. If this isn't a build script, then it depends on the
- // build script if there is one.
- if unit.target.is_custom_build() {
- return Ok(ret);
- }
- ret.extend(dep_build_script(unit, cx));
-
- // If this target is a binary, test, example, etc, then it depends on
- // the library of the same package. The call to `resolve.deps` above
- // didn't include `pkg` in the return values, so we need to special case
- // it here and see if we need to push `(pkg, pkg_lib_target)`.
- if unit.target.is_lib() && !unit.profile.doc {
- return Ok(ret);
- }
- ret.extend(maybe_lib(unit, cx));
-
- // Integration tests/benchmarks require binaries to be built
- if unit.profile.test && (unit.target.is_test() || unit.target.is_bench()) {
- ret.extend(
- unit.pkg
- .targets()
- .iter()
- .filter(|t| {
- let no_required_features = Vec::new();
-
- t.is_bin() &&
- // Skip binaries with required features that have not been selected.
- t.required_features().unwrap_or(&no_required_features).iter().all(|f| {
- cx.resolve.features(id).contains(f)
- })
- })
- .map(|t| Unit {
- pkg: unit.pkg,
- target: t,
- profile: lib_or_check_profile(unit, t, cx),
- kind: unit.kind.for_target(t),
- }),
- );
- }
- Ok(ret)
-}
-
-/// Returns the dependencies needed to run a build script.
-///
-/// The `unit` provided must represent an execution of a build script, and
-/// the returned set of units must all be run before `unit` is run.
-fn compute_deps_custom_build<'a, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
- deps: &mut HashMap<Unit<'a>, Vec<Unit<'a>>>,
-) -> CargoResult<Vec<Unit<'a>>> {
- // When not overridden, then the dependencies to run a build script are:
- //
- // 1. Compiling the build script itcx
- // 2. For each immediate dependency of our package which has a `links`
- // key, the execution of that build script.
- let not_custom_build = unit.pkg
- .targets()
- .iter()
- .find(|t| !t.is_custom_build())
- .unwrap();
- let tmp = Unit {
- target: not_custom_build,
- profile: &cx.profiles.dev,
- ..*unit
- };
- let deps = deps_of(&tmp, cx, deps)?;
- Ok(deps.iter()
- .filter_map(|unit| {
- if !unit.target.linkable() || unit.pkg.manifest().links().is_none() {
- return None;
- }
- dep_build_script(unit, cx)
- })
- .chain(Some(Unit {
- profile: cx.build_script_profile(unit.pkg.package_id()),
- kind: Kind::Host, // build scripts always compiled for the host
- ..*unit
- }))
- .collect())
-}
-
-/// Returns the dependencies necessary to document a package
-fn compute_deps_doc<'a, 'cfg>(
- unit: &Unit<'a>,
- cx: &Context<'a, 'cfg>,
-) -> CargoResult<Vec<Unit<'a>>> {
- let deps = cx.resolve
- .deps(unit.pkg.package_id())
- .filter(|dep| {
- unit.pkg
- .dependencies()
- .iter()
- .filter(|d| d.name() == dep.name())
- .any(|dep| match dep.kind() {
- DepKind::Normal => cx.dep_platform_activated(dep, unit.kind),
- _ => false,
- })
- })
- .map(|dep| cx.get_package(dep));
-
- // To document a library, we depend on dependencies actually being
- // built. If we're documenting *all* libraries, then we also depend on
- // the documentation of the library being built.
- let mut ret = Vec::new();
- for dep in deps {
- let dep = dep?;
- let lib = match dep.targets().iter().find(|t| t.is_lib()) {
- Some(lib) => lib,
- None => continue,
- };
- ret.push(Unit {
- pkg: dep,
- target: lib,
- profile: lib_or_check_profile(unit, lib, cx),
- kind: unit.kind.for_target(lib),
- });
- if cx.build_config.doc_all {
- ret.push(Unit {
- pkg: dep,
- target: lib,
- profile: &cx.profiles.doc,
- kind: unit.kind.for_target(lib),
- });
- }
- }
-
- // Be sure to build/run the build script for documented libraries as
- ret.extend(dep_build_script(unit, cx));
-
- // If we document a binary, we need the library available
- if unit.target.is_bin() {
- ret.extend(maybe_lib(unit, cx));
- }
- Ok(ret)
-}
-
-fn maybe_lib<'a, 'cfg>(unit: &Unit<'a>, cx: &Context<'a, 'cfg>) -> Option<Unit<'a>> {
- unit.pkg
- .targets()
- .iter()
- .find(|t| t.linkable())
- .map(|t| Unit {
- pkg: unit.pkg,
- target: t,
- profile: lib_or_check_profile(unit, t, cx),
- kind: unit.kind.for_target(t),
- })
-}
-
-/// If a build script is scheduled to be run for the package specified by
-/// `unit`, this function will return the unit to run that build script.
-///
-/// Overriding a build script simply means that the running of the build
-/// script itself doesn't have any dependencies, so even in that case a unit
-/// of work is still returned. `None` is only returned if the package has no
-/// build script.
-fn dep_build_script<'a, 'cfg>(unit: &Unit<'a>, cx: &Context<'a, 'cfg>) -> Option<Unit<'a>> {
- unit.pkg
- .targets()
- .iter()
- .find(|t| t.is_custom_build())
- .map(|t| Unit {
- pkg: unit.pkg,
- target: t,
- profile: &cx.profiles.custom_build,
- kind: unit.kind,
- })
-}
-
-fn lib_or_check_profile<'a, 'cfg>(
- unit: &Unit,
- target: &Target,
- cx: &Context<'a, 'cfg>,
-) -> &'a Profile {
- if !target.is_custom_build() && !target.for_host()
- && (unit.profile.check || (unit.profile.doc && !unit.profile.test))
- {
- return &cx.profiles.check;
- }
- cx.lib_profile()
-}
+++ /dev/null
-use std::collections::{BTreeSet, HashSet};
-use std::collections::hash_map::{Entry, HashMap};
-use std::fs;
-use std::path::{Path, PathBuf};
-use std::str;
-use std::sync::{Arc, Mutex};
-
-use core::PackageId;
-use util::{Cfg, Freshness};
-use util::errors::{CargoResult, CargoResultExt};
-use util::{self, internal, paths, profile};
-use util::machine_message;
-
-use super::job::Work;
-use super::{fingerprint, Context, Kind, Unit};
-
-/// Contains the parsed output of a custom build script.
-#[derive(Clone, Debug, Hash)]
-pub struct BuildOutput {
- /// Paths to pass to rustc with the `-L` flag
- pub library_paths: Vec<PathBuf>,
- /// Names and link kinds of libraries, suitable for the `-l` flag
- pub library_links: Vec<String>,
- /// Various `--cfg` flags to pass to the compiler
- pub cfgs: Vec<String>,
- /// Additional environment variables to run the compiler with.
- pub env: Vec<(String, String)>,
- /// Metadata to pass to the immediate dependencies
- pub metadata: Vec<(String, String)>,
- /// Paths to trigger a rerun of this build script.
- pub rerun_if_changed: Vec<PathBuf>,
- /// Environment variables which, when changed, will cause a rebuild.
- pub rerun_if_env_changed: Vec<String>,
- /// Warnings generated by this build,
- pub warnings: Vec<String>,
-}
-
-/// Map of packages to build info
-pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>;
-
-/// Build info and overrides
-pub struct BuildState {
- pub outputs: Mutex<BuildMap>,
- overrides: HashMap<(String, Kind), BuildOutput>,
-}
-
-#[derive(Default)]
-pub struct BuildScripts {
- // Cargo will use this `to_link` vector to add -L flags to compiles as we
- // propagate them upwards towards the final build. Note, however, that we
- // need to preserve the ordering of `to_link` to be topologically sorted.
- // This will ensure that build scripts which print their paths properly will
- // correctly pick up the files they generated (if there are duplicates
- // elsewhere).
- //
- // To preserve this ordering, the (id, kind) is stored in two places, once
- // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain
- // this as we're building interactively below to ensure that the memory
- // usage here doesn't blow up too much.
- //
- // For more information, see #2354
- pub to_link: Vec<(PackageId, Kind)>,
- seen_to_link: HashSet<(PackageId, Kind)>,
- pub plugins: BTreeSet<PackageId>,
-}
-
-pub struct BuildDeps {
- pub build_script_output: PathBuf,
- pub rerun_if_changed: Vec<PathBuf>,
- pub rerun_if_env_changed: Vec<String>,
-}
-
-/// Prepares a `Work` that executes the target as a custom build script.
-///
-/// The `req` given is the requirement which this run of the build script will
-/// prepare work for. If the requirement is specified as both the target and the
-/// host platforms it is assumed that the two are equal and the build script is
-/// only run once (not twice).
-pub fn prepare<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<(Work, Work, Freshness)> {
- let _p = profile::start(format!(
- "build script prepare: {}/{}",
- unit.pkg,
- unit.target.name()
- ));
-
- let key = (unit.pkg.package_id().clone(), unit.kind);
- let overridden = cx.build_script_overridden.contains(&key);
- let (work_dirty, work_fresh) = if overridden {
- (Work::noop(), Work::noop())
- } else {
- build_work(cx, unit)?
- };
-
- // Now that we've prep'd our work, build the work needed to manage the
- // fingerprint and then start returning that upwards.
- let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?;
-
- Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness))
-}
-
-fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> {
- assert!(unit.profile.run_custom_build);
- let dependencies = cx.dep_targets(unit);
- let build_script_unit = dependencies
- .iter()
- .find(|d| !d.profile.run_custom_build && d.target.is_custom_build())
- .expect("running a script not depending on an actual script");
- let script_output = cx.files().build_script_dir(build_script_unit);
- let build_output = cx.files().build_script_out_dir(unit);
-
- // Building the command to execute
- let to_exec = script_output.join(unit.target.name());
-
- // Start preparing the process to execute, starting out with some
- // environment variables. Note that the profile-related environment
- // variables are not set with this the build script's profile but rather the
- // package's library profile.
- let profile = cx.lib_profile();
- let to_exec = to_exec.into_os_string();
- let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?;
- cmd.env("OUT_DIR", &build_output)
- .env("CARGO_MANIFEST_DIR", unit.pkg.root())
- .env("NUM_JOBS", &cx.jobs().to_string())
- .env(
- "TARGET",
- &match unit.kind {
- Kind::Host => cx.host_triple(),
- Kind::Target => cx.target_triple(),
- },
- )
- .env("DEBUG", &profile.debuginfo.is_some().to_string())
- .env("OPT_LEVEL", &profile.opt_level)
- .env(
- "PROFILE",
- if cx.build_config.release {
- "release"
- } else {
- "debug"
- },
- )
- .env("HOST", cx.host_triple())
- .env("RUSTC", &cx.config.rustc()?.path)
- .env("RUSTDOC", &*cx.config.rustdoc()?)
- .inherit_jobserver(&cx.jobserver);
-
- if let Some(links) = unit.pkg.manifest().links() {
- cmd.env("CARGO_MANIFEST_LINKS", links);
- }
-
- // Be sure to pass along all enabled features for this package, this is the
- // last piece of statically known information that we have.
- for feat in cx.resolve.features(unit.pkg.package_id()).iter() {
- cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1");
- }
-
- let mut cfg_map = HashMap::new();
- for cfg in cx.cfg(unit.kind) {
- match *cfg {
- Cfg::Name(ref n) => {
- cfg_map.insert(n.clone(), None);
- }
- Cfg::KeyPair(ref k, ref v) => {
- if let Some(ref mut values) =
- *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new()))
- {
- values.push(v.clone())
- }
- }
- }
- }
- for (k, v) in cfg_map {
- let k = format!("CARGO_CFG_{}", super::envify(&k));
- match v {
- Some(list) => {
- cmd.env(&k, list.join(","));
- }
- None => {
- cmd.env(&k, "");
- }
- }
- }
-
- // Gather the set of native dependencies that this package has along with
- // some other variables to close over.
- //
- // This information will be used at build-time later on to figure out which
- // sorts of variables need to be discovered at that time.
- let lib_deps = {
- dependencies
- .iter()
- .filter_map(|unit| {
- if unit.profile.run_custom_build {
- Some((
- unit.pkg.manifest().links().unwrap().to_string(),
- unit.pkg.package_id().clone(),
- ))
- } else {
- None
- }
- })
- .collect::<Vec<_>>()
- };
- let pkg_name = unit.pkg.to_string();
- let build_state = Arc::clone(&cx.build_state);
- let id = unit.pkg.package_id().clone();
- let (output_file, err_file, root_output_file) = {
- let build_output_parent = build_output.parent().unwrap();
- let output_file = build_output_parent.join("output");
- let err_file = build_output_parent.join("stderr");
- let root_output_file = build_output_parent.join("root-output");
- (output_file, err_file, root_output_file)
- };
- let root_output = cx.files().target_root().to_path_buf();
- let all = (
- id.clone(),
- pkg_name.clone(),
- Arc::clone(&build_state),
- output_file.clone(),
- root_output.clone(),
- );
- let build_scripts = super::load_build_deps(cx, unit);
- let kind = unit.kind;
- let json_messages = cx.build_config.json_messages;
-
- // Check to see if the build script has already run, and if it has keep
- // track of whether it has told us about some explicit dependencies
- let prev_root_output = paths::read_bytes(&root_output_file)
- .and_then(|bytes| util::bytes2path(&bytes))
- .unwrap_or_else(|_| cmd.get_cwd().unwrap().to_path_buf());
- let prev_output =
- BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output).ok();
- let deps = BuildDeps::new(&output_file, prev_output.as_ref());
- cx.build_explicit_deps.insert(*unit, deps);
-
- fs::create_dir_all(&script_output)?;
- fs::create_dir_all(&build_output)?;
-
- // Prepare the unit of "dirty work" which will actually run the custom build
- // command.
- //
- // Note that this has to do some extra work just before running the command
- // to determine extra environment variables and such.
- let dirty = Work::new(move |state| {
- // Make sure that OUT_DIR exists.
- //
- // If we have an old build directory, then just move it into place,
- // otherwise create it!
- if fs::metadata(&build_output).is_err() {
- fs::create_dir(&build_output).chain_err(|| {
- internal(
- "failed to create script output directory for \
- build command",
- )
- })?;
- }
-
- // For all our native lib dependencies, pick up their metadata to pass
- // along to this custom build command. We're also careful to augment our
- // dynamic library search path in case the build script depended on any
- // native dynamic libraries.
- {
- let build_state = build_state.outputs.lock().unwrap();
- for (name, id) in lib_deps {
- let key = (id.clone(), kind);
- let state = build_state.get(&key).ok_or_else(|| {
- internal(format!(
- "failed to locate build state for env \
- vars: {}/{:?}",
- id, kind
- ))
- })?;
- let data = &state.metadata;
- for &(ref key, ref value) in data.iter() {
- cmd.env(
- &format!("DEP_{}_{}", super::envify(&name), super::envify(key)),
- value,
- );
- }
- }
- if let Some(build_scripts) = build_scripts {
- super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &root_output)?;
- }
- }
-
- // And now finally, run the build command itself!
- state.running(&cmd);
- let output = cmd.exec_with_streaming(
- &mut |out_line| {
- state.stdout(out_line);
- Ok(())
- },
- &mut |err_line| {
- state.stderr(err_line);
- Ok(())
- },
- true,
- ).map_err(|e| {
- format_err!(
- "failed to run custom build command for `{}`\n{}",
- pkg_name,
- e
- )
- })?;
-
- // After the build command has finished running, we need to be sure to
- // remember all of its output so we can later discover precisely what it
- // was, even if we don't run the build command again (due to freshness).
- //
- // This is also the location where we provide feedback into the build
- // state informing what variables were discovered via our script as
- // well.
- paths::write(&output_file, &output.stdout)?;
- paths::write(&err_file, &output.stderr)?;
- paths::write(&root_output_file, util::path2bytes(&root_output)?)?;
- let parsed_output =
- BuildOutput::parse(&output.stdout, &pkg_name, &root_output, &root_output)?;
-
- if json_messages {
- let library_paths = parsed_output
- .library_paths
- .iter()
- .map(|l| l.display().to_string())
- .collect::<Vec<_>>();
- machine_message::emit(&machine_message::BuildScript {
- package_id: &id,
- linked_libs: &parsed_output.library_links,
- linked_paths: &library_paths,
- cfgs: &parsed_output.cfgs,
- env: &parsed_output.env,
- });
- }
-
- build_state.insert(id, kind, parsed_output);
- Ok(())
- });
-
- // Now that we've prepared our work-to-do, we need to prepare the fresh work
- // itself to run when we actually end up just discarding what we calculated
- // above.
- let fresh = Work::new(move |_tx| {
- let (id, pkg_name, build_state, output_file, root_output) = all;
- let output = match prev_output {
- Some(output) => output,
- None => {
- BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output)?
- }
- };
- build_state.insert(id, kind, output);
- Ok(())
- });
-
- Ok((dirty, fresh))
-}
-
-impl BuildState {
- pub fn new(config: &super::BuildConfig) -> BuildState {
- let mut overrides = HashMap::new();
- let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host));
- let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target));
- for ((name, output), kind) in i1.chain(i2) {
- overrides.insert((name.clone(), kind), output.clone());
- }
- BuildState {
- outputs: Mutex::new(HashMap::new()),
- overrides,
- }
- }
-
- fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) {
- self.outputs.lock().unwrap().insert((id, kind), output);
- }
-}
-
-impl BuildOutput {
- pub fn parse_file(
- path: &Path,
- pkg_name: &str,
- root_output_when_generated: &Path,
- root_output: &Path,
- ) -> CargoResult<BuildOutput> {
- let contents = paths::read_bytes(path)?;
- BuildOutput::parse(&contents, pkg_name, root_output_when_generated, root_output)
- }
-
- // Parses the output of a script.
- // The `pkg_name` is used for error messages.
- pub fn parse(
- input: &[u8],
- pkg_name: &str,
- root_output_when_generated: &Path,
- root_output: &Path,
- ) -> CargoResult<BuildOutput> {
- let mut library_paths = Vec::new();
- let mut library_links = Vec::new();
- let mut cfgs = Vec::new();
- let mut env = Vec::new();
- let mut metadata = Vec::new();
- let mut rerun_if_changed = Vec::new();
- let mut rerun_if_env_changed = Vec::new();
- let mut warnings = Vec::new();
- let whence = format!("build script of `{}`", pkg_name);
-
- for line in input.split(|b| *b == b'\n') {
- let line = match str::from_utf8(line) {
- Ok(line) => line.trim(),
- Err(..) => continue,
- };
- let mut iter = line.splitn(2, ':');
- if iter.next() != Some("cargo") {
- // skip this line since it doesn't start with "cargo:"
- continue;
- }
- let data = match iter.next() {
- Some(val) => val,
- None => continue,
- };
-
- // getting the `key=value` part of the line
- let mut iter = data.splitn(2, '=');
- let key = iter.next();
- let value = iter.next();
- let (key, value) = match (key, value) {
- (Some(a), Some(b)) => (a, b.trim_right()),
- // line started with `cargo:` but didn't match `key=value`
- _ => bail!("Wrong output in {}: `{}`", whence, line),
- };
-
- let path = |val: &str| match Path::new(val).strip_prefix(root_output_when_generated) {
- Ok(path) => root_output.join(path),
- Err(_) => PathBuf::from(val),
- };
-
- match key {
- "rustc-flags" => {
- let (paths, links) = BuildOutput::parse_rustc_flags(value, &whence)?;
- library_links.extend(links.into_iter());
- library_paths.extend(paths.into_iter());
- }
- "rustc-link-lib" => library_links.push(value.to_string()),
- "rustc-link-search" => library_paths.push(path(value)),
- "rustc-cfg" => cfgs.push(value.to_string()),
- "rustc-env" => env.push(BuildOutput::parse_rustc_env(value, &whence)?),
- "warning" => warnings.push(value.to_string()),
- "rerun-if-changed" => rerun_if_changed.push(path(value)),
- "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()),
- _ => metadata.push((key.to_string(), value.to_string())),
- }
- }
-
- Ok(BuildOutput {
- library_paths,
- library_links,
- cfgs,
- env,
- metadata,
- rerun_if_changed,
- rerun_if_env_changed,
- warnings,
- })
- }
-
- pub fn parse_rustc_flags(
- value: &str,
- whence: &str,
- ) -> CargoResult<(Vec<PathBuf>, Vec<String>)> {
- let value = value.trim();
- let mut flags_iter = value
- .split(|c: char| c.is_whitespace())
- .filter(|w| w.chars().any(|c| !c.is_whitespace()));
- let (mut library_paths, mut library_links) = (Vec::new(), Vec::new());
- while let Some(flag) = flags_iter.next() {
- if flag != "-l" && flag != "-L" {
- bail!(
- "Only `-l` and `-L` flags are allowed in {}: `{}`",
- whence,
- value
- )
- }
- let value = match flags_iter.next() {
- Some(v) => v,
- None => bail!(
- "Flag in rustc-flags has no value in {}: `{}`",
- whence,
- value
- ),
- };
- match flag {
- "-l" => library_links.push(value.to_string()),
- "-L" => library_paths.push(PathBuf::from(value)),
-
- // was already checked above
- _ => bail!("only -l and -L flags are allowed"),
- };
- }
- Ok((library_paths, library_links))
- }
-
- pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> {
- let mut iter = value.splitn(2, '=');
- let name = iter.next();
- let val = iter.next();
- match (name, val) {
- (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())),
- _ => bail!("Variable rustc-env has no value in {}: {}", whence, value),
- }
- }
-}
-
-impl BuildDeps {
- pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps {
- BuildDeps {
- build_script_output: output_file.to_path_buf(),
- rerun_if_changed: output
- .map(|p| &p.rerun_if_changed)
- .cloned()
- .unwrap_or_default(),
- rerun_if_env_changed: output
- .map(|p| &p.rerun_if_env_changed)
- .cloned()
- .unwrap_or_default(),
- }
- }
-}
-
-/// Compute the `build_scripts` map in the `Context` which tracks what build
-/// scripts each package depends on.
-///
-/// The global `build_scripts` map lists for all (package, kind) tuples what set
-/// of packages' build script outputs must be considered. For example this lists
-/// all dependencies' `-L` flags which need to be propagated transitively.
-///
-/// The given set of targets to this function is the initial set of
-/// targets/profiles which are being built.
-pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> {
- let mut ret = HashMap::new();
- for unit in units {
- build(&mut ret, cx, unit)?;
- }
- cx.build_scripts
- .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v))));
- return Ok(());
-
- // Recursive function to build up the map we're constructing. This function
- // memoizes all of its return values as it goes along.
- fn build<'a, 'b, 'cfg>(
- out: &'a mut HashMap<Unit<'b>, BuildScripts>,
- cx: &mut Context<'b, 'cfg>,
- unit: &Unit<'b>,
- ) -> CargoResult<&'a BuildScripts> {
- // Do a quick pre-flight check to see if we've already calculated the
- // set of dependencies.
- if out.contains_key(unit) {
- return Ok(&out[unit]);
- }
-
- {
- let key = unit.pkg
- .manifest()
- .links()
- .map(|l| (l.to_string(), unit.kind));
- let build_state = &cx.build_state;
- if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) {
- let key = (unit.pkg.package_id().clone(), unit.kind);
- cx.build_script_overridden.insert(key.clone());
- build_state
- .outputs
- .lock()
- .unwrap()
- .insert(key, output.clone());
- }
- }
-
- let mut ret = BuildScripts::default();
-
- if !unit.target.is_custom_build() && unit.pkg.has_custom_build() {
- add_to_link(&mut ret, unit.pkg.package_id(), unit.kind);
- }
-
- // We want to invoke the compiler deterministically to be cache-friendly
- // to rustc invocation caching schemes, so be sure to generate the same
- // set of build script dependency orderings via sorting the targets that
- // come out of the `Context`.
- let mut targets = cx.dep_targets(unit);
- targets.sort_by_key(|u| u.pkg.package_id());
-
- for unit in targets.iter() {
- let dep_scripts = build(out, cx, unit)?;
-
- if unit.target.for_host() {
- ret.plugins
- .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned());
- } else if unit.target.linkable() {
- for &(ref pkg, kind) in dep_scripts.to_link.iter() {
- add_to_link(&mut ret, pkg, kind);
- }
- }
- }
-
- match out.entry(*unit) {
- Entry::Vacant(entry) => Ok(entry.insert(ret)),
- Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"),
- }
- }
-
- // When adding an entry to 'to_link' we only actually push it on if the
- // script hasn't seen it yet (e.g. we don't push on duplicates).
- fn add_to_link(scripts: &mut BuildScripts, pkg: &PackageId, kind: Kind) {
- if scripts.seen_to_link.insert((pkg.clone(), kind)) {
- scripts.to_link.push((pkg.clone(), kind));
- }
- }
-}
+++ /dev/null
-use std::env;
-use std::fs;
-use std::hash::{self, Hasher};
-use std::path::{Path, PathBuf};
-use std::sync::{Arc, Mutex};
-
-use filetime::FileTime;
-use serde::ser::{self, Serialize};
-use serde::de::{self, Deserialize};
-use serde_json;
-
-use core::{Edition, Package, TargetKind};
-use util;
-use util::{internal, profile, Dirty, Fresh, Freshness};
-use util::errors::{CargoResult, CargoResultExt};
-use util::paths;
-
-use super::job::Work;
-use super::context::{Context, FileFlavor, Unit};
-use super::custom_build::BuildDeps;
-
-/// A tuple result of the `prepare_foo` functions in this module.
-///
-/// The first element of the triple is whether the target in question is
-/// currently fresh or not, and the second two elements are work to perform when
-/// the target is dirty or fresh, respectively.
-///
-/// Both units of work are always generated because a fresh package may still be
-/// rebuilt if some upstream dependency changes.
-pub type Preparation = (Freshness, Work, Work);
-
-/// Prepare the necessary work for the fingerprint for a specific target.
-///
-/// When dealing with fingerprints, cargo gets to choose what granularity
-/// "freshness" is considered at. One option is considering freshness at the
-/// package level. This means that if anything in a package changes, the entire
-/// package is rebuilt, unconditionally. This simplicity comes at a cost,
-/// however, in that test-only changes will cause libraries to be rebuilt, which
-/// is quite unfortunate!
-///
-/// The cost was deemed high enough that fingerprints are now calculated at the
-/// layer of a target rather than a package. Each target can then be kept track
-/// of separately and only rebuilt as necessary. This requires cargo to
-/// understand what the inputs are to a target, so we drive rustc with the
-/// --dep-info flag to learn about all input files to a unit of compilation.
-///
-/// This function will calculate the fingerprint for a target and prepare the
-/// work necessary to either write the fingerprint or copy over all fresh files
-/// from the old directories to their new locations.
-pub fn prepare_target<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<Preparation> {
- let _p = profile::start(format!(
- "fingerprint: {} / {}",
- unit.pkg.package_id(),
- unit.target.name()
- ));
- let new = cx.files().fingerprint_dir(unit);
- let loc = new.join(&filename(cx, unit));
-
- debug!("fingerprint at: {}", loc.display());
-
- let fingerprint = calculate(cx, unit)?;
- let compare = compare_old_fingerprint(&loc, &*fingerprint);
- log_compare(unit, &compare);
-
- // If our comparison failed (e.g. we're going to trigger a rebuild of this
- // crate), then we also ensure the source of the crate passes all
- // verification checks before we build it.
- //
- // The `Source::verify` method is intended to allow sources to execute
- // pre-build checks to ensure that the relevant source code is all
- // up-to-date and as expected. This is currently used primarily for
- // directory sources which will use this hook to perform an integrity check
- // on all files in the source to ensure they haven't changed. If they have
- // changed then an error is issued.
- if compare.is_err() {
- let source_id = unit.pkg.package_id().source_id();
- let sources = cx.packages.sources();
- let source = sources
- .get(source_id)
- .ok_or_else(|| internal("missing package source"))?;
- source.verify(unit.pkg.package_id())?;
- }
-
- let root = cx.files().out_dir(unit);
- let mut missing_outputs = false;
- if unit.profile.doc {
- missing_outputs = !root.join(unit.target.crate_name())
- .join("index.html")
- .exists();
- } else {
- for output in cx.outputs(unit)?.iter() {
- if output.flavor == FileFlavor::DebugInfo {
- continue;
- }
- missing_outputs |= !output.path.exists();
- if let Some(ref link_dst) = output.hardlink {
- missing_outputs |= !link_dst.exists();
- }
- }
- }
-
- let allow_failure = unit.profile.rustc_args.is_some();
- let target_root = cx.files().target_root().to_path_buf();
- let write_fingerprint = Work::new(move |_| {
- match fingerprint.update_local(&target_root) {
- Ok(()) => {}
- Err(..) if allow_failure => return Ok(()),
- Err(e) => return Err(e),
- }
- write_fingerprint(&loc, &*fingerprint)
- });
-
- let fresh = compare.is_ok() && !missing_outputs;
- Ok((
- if fresh { Fresh } else { Dirty },
- write_fingerprint,
- Work::noop(),
- ))
-}
-
-/// A fingerprint can be considered to be a "short string" representing the
-/// state of a world for a package.
-///
-/// If a fingerprint ever changes, then the package itself needs to be
-/// recompiled. Inputs to the fingerprint include source code modifications,
-/// compiler flags, compiler version, etc. This structure is not simply a
-/// `String` due to the fact that some fingerprints cannot be calculated lazily.
-///
-/// Path sources, for example, use the mtime of the corresponding dep-info file
-/// as a fingerprint (all source files must be modified *before* this mtime).
-/// This dep-info file is not generated, however, until after the crate is
-/// compiled. As a result, this structure can be thought of as a fingerprint
-/// to-be. The actual value can be calculated via `hash()`, but the operation
-/// may fail as some files may not have been generated.
-///
-/// Note that dependencies are taken into account for fingerprints because rustc
-/// requires that whenever an upstream crate is recompiled that all downstream
-/// dependants are also recompiled. This is typically tracked through
-/// `DependencyQueue`, but it also needs to be retained here because Cargo can
-/// be interrupted while executing, losing the state of the `DependencyQueue`
-/// graph.
-#[derive(Serialize, Deserialize)]
-pub struct Fingerprint {
- rustc: u64,
- features: String,
- target: u64,
- profile: u64,
- path: u64,
- #[serde(serialize_with = "serialize_deps", deserialize_with = "deserialize_deps")]
- deps: Vec<(String, Arc<Fingerprint>)>,
- local: Vec<LocalFingerprint>,
- #[serde(skip_serializing, skip_deserializing)]
- memoized_hash: Mutex<Option<u64>>,
- rustflags: Vec<String>,
- edition: Edition,
-}
-
-fn serialize_deps<S>(deps: &[(String, Arc<Fingerprint>)], ser: S) -> Result<S::Ok, S::Error>
-where
- S: ser::Serializer,
-{
- deps.iter()
- .map(|&(ref a, ref b)| (a, b.hash()))
- .collect::<Vec<_>>()
- .serialize(ser)
-}
-
-fn deserialize_deps<'de, D>(d: D) -> Result<Vec<(String, Arc<Fingerprint>)>, D::Error>
-where
- D: de::Deserializer<'de>,
-{
- let decoded = <Vec<(String, u64)>>::deserialize(d)?;
- Ok(decoded
- .into_iter()
- .map(|(name, hash)| {
- (
- name,
- Arc::new(Fingerprint {
- rustc: 0,
- target: 0,
- profile: 0,
- path: 0,
- local: vec![LocalFingerprint::Precalculated(String::new())],
- features: String::new(),
- deps: Vec::new(),
- memoized_hash: Mutex::new(Some(hash)),
- edition: Edition::Edition2015,
- rustflags: Vec::new(),
- }),
- )
- })
- .collect())
-}
-
-#[derive(Serialize, Deserialize, Hash)]
-enum LocalFingerprint {
- Precalculated(String),
- MtimeBased(MtimeSlot, PathBuf),
- EnvBased(String, Option<String>),
-}
-
-impl LocalFingerprint {
- fn mtime(root: &Path, mtime: Option<FileTime>, path: &Path) -> LocalFingerprint {
- let mtime = MtimeSlot(Mutex::new(mtime));
- assert!(path.is_absolute());
- let path = path.strip_prefix(root).unwrap_or(path);
- LocalFingerprint::MtimeBased(mtime, path.to_path_buf())
- }
-}
-
-struct MtimeSlot(Mutex<Option<FileTime>>);
-
-impl Fingerprint {
- fn update_local(&self, root: &Path) -> CargoResult<()> {
- let mut hash_busted = false;
- for local in self.local.iter() {
- match *local {
- LocalFingerprint::MtimeBased(ref slot, ref path) => {
- let path = root.join(path);
- let meta = fs::metadata(&path)
- .chain_err(|| internal(format!("failed to stat `{}`", path.display())))?;
- let mtime = FileTime::from_last_modification_time(&meta);
- *slot.0.lock().unwrap() = Some(mtime);
- }
- LocalFingerprint::EnvBased(..) | LocalFingerprint::Precalculated(..) => continue,
- }
- hash_busted = true;
- }
-
- if hash_busted {
- *self.memoized_hash.lock().unwrap() = None;
- }
- Ok(())
- }
-
- fn hash(&self) -> u64 {
- if let Some(s) = *self.memoized_hash.lock().unwrap() {
- return s;
- }
- let ret = util::hash_u64(self);
- *self.memoized_hash.lock().unwrap() = Some(ret);
- ret
- }
-
- fn compare(&self, old: &Fingerprint) -> CargoResult<()> {
- if self.rustc != old.rustc {
- bail!("rust compiler has changed")
- }
- if self.features != old.features {
- bail!(
- "features have changed: {} != {}",
- self.features,
- old.features
- )
- }
- if self.target != old.target {
- bail!("target configuration has changed")
- }
- if self.path != old.path {
- bail!("path to the compiler has changed")
- }
- if self.profile != old.profile {
- bail!("profile configuration has changed")
- }
- if self.rustflags != old.rustflags {
- bail!("RUSTFLAGS has changed")
- }
- if self.local.len() != old.local.len() {
- bail!("local lens changed");
- }
- if self.edition != old.edition {
- bail!("edition changed")
- }
- for (new, old) in self.local.iter().zip(&old.local) {
- match (new, old) {
- (
- &LocalFingerprint::Precalculated(ref a),
- &LocalFingerprint::Precalculated(ref b),
- ) => {
- if a != b {
- bail!("precalculated components have changed: {} != {}", a, b)
- }
- }
- (
- &LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap),
- &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp),
- ) => {
- let on_disk_mtime = on_disk_mtime.0.lock().unwrap();
- let previously_built_mtime = previously_built_mtime.0.lock().unwrap();
-
- let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) {
- (None, None) => false,
- (Some(_), None) | (None, Some(_)) => true,
- (Some(on_disk), Some(previously_built)) => on_disk > previously_built,
- };
-
- if should_rebuild {
- bail!(
- "mtime based components have changed: previously {:?} now {:?}, \
- paths are {:?} and {:?}",
- *previously_built_mtime,
- *on_disk_mtime,
- ap,
- bp
- )
- }
- }
- (
- &LocalFingerprint::EnvBased(ref akey, ref avalue),
- &LocalFingerprint::EnvBased(ref bkey, ref bvalue),
- ) => {
- if *akey != *bkey {
- bail!("env vars changed: {} != {}", akey, bkey);
- }
- if *avalue != *bvalue {
- bail!(
- "env var `{}` changed: previously {:?} now {:?}",
- akey,
- bvalue,
- avalue
- )
- }
- }
- _ => bail!("local fingerprint type has changed"),
- }
- }
-
- if self.deps.len() != old.deps.len() {
- bail!("number of dependencies has changed")
- }
- for (a, b) in self.deps.iter().zip(old.deps.iter()) {
- if a.1.hash() != b.1.hash() {
- bail!("new ({}) != old ({})", a.0, b.0)
- }
- }
- Ok(())
- }
-}
-
-impl hash::Hash for Fingerprint {
- fn hash<H: Hasher>(&self, h: &mut H) {
- let Fingerprint {
- rustc,
- ref features,
- target,
- path,
- profile,
- ref deps,
- ref local,
- edition,
- ref rustflags,
- ..
- } = *self;
- (
- rustc,
- features,
- target,
- path,
- profile,
- local,
- edition,
- rustflags,
- ).hash(h);
-
- h.write_usize(deps.len());
- for &(ref name, ref fingerprint) in deps {
- name.hash(h);
- // use memoized dep hashes to avoid exponential blowup
- h.write_u64(Fingerprint::hash(fingerprint));
- }
- }
-}
-
-impl hash::Hash for MtimeSlot {
- fn hash<H: Hasher>(&self, h: &mut H) {
- self.0.lock().unwrap().hash(h)
- }
-}
-
-impl ser::Serialize for MtimeSlot {
- fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error>
- where
- S: ser::Serializer,
- {
- self.0
- .lock()
- .unwrap()
- .map(|ft| (ft.seconds_relative_to_1970(), ft.nanoseconds()))
- .serialize(s)
- }
-}
-
-impl<'de> de::Deserialize<'de> for MtimeSlot {
- fn deserialize<D>(d: D) -> Result<MtimeSlot, D::Error>
- where
- D: de::Deserializer<'de>,
- {
- let kind: Option<(u64, u32)> = de::Deserialize::deserialize(d)?;
- Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| {
- FileTime::from_seconds_since_1970(s, n)
- }))))
- }
-}
-
-/// Calculates the fingerprint for a package/target pair.
-///
-/// This fingerprint is used by Cargo to learn about when information such as:
-///
-/// * A non-path package changes (changes version, changes revision, etc).
-/// * Any dependency changes
-/// * The compiler changes
-/// * The set of features a package is built with changes
-/// * The profile a target is compiled with changes (e.g. opt-level changes)
-///
-/// Information like file modification time is only calculated for path
-/// dependencies and is calculated in `calculate_target_fresh`.
-fn calculate<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<Arc<Fingerprint>> {
- if let Some(s) = cx.fingerprints.get(unit) {
- return Ok(Arc::clone(s));
- }
-
- // Next, recursively calculate the fingerprint for all of our dependencies.
- //
- // Skip the fingerprints of build scripts as they may not always be
- // available and the dirtiness propagation for modification is tracked
- // elsewhere. Also skip fingerprints of binaries because they don't actually
- // induce a recompile, they're just dependencies in the sense that they need
- // to be built.
- let deps = cx.dep_targets(unit);
- let deps = deps.iter()
- .filter(|u| !u.target.is_custom_build() && !u.target.is_bin())
- .map(|unit| {
- calculate(cx, unit).map(|fingerprint| (unit.pkg.package_id().to_string(), fingerprint))
- })
- .collect::<CargoResult<Vec<_>>>()?;
-
- // And finally, calculate what our own local fingerprint is
- let local = if use_dep_info(unit) {
- let dep_info = dep_info_loc(cx, unit);
- let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?;
- LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info)
- } else {
- let fingerprint = pkg_fingerprint(cx, unit.pkg)?;
- LocalFingerprint::Precalculated(fingerprint)
- };
- let mut deps = deps;
- deps.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b));
- let extra_flags = if unit.profile.doc {
- cx.rustdocflags_args(unit)?
- } else {
- cx.rustflags_args(unit)?
- };
- let fingerprint = Arc::new(Fingerprint {
- rustc: util::hash_u64(&cx.config.rustc()?.verbose_version),
- target: util::hash_u64(&unit.target),
- profile: util::hash_u64(&(&unit.profile, cx.incremental_args(unit)?)),
- // Note that .0 is hashed here, not .1 which is the cwd. That doesn't
- // actually affect the output artifact so there's no need to hash it.
- path: util::hash_u64(&super::path_args(cx, unit).0),
- features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())),
- deps,
- local: vec![local],
- memoized_hash: Mutex::new(None),
- edition: unit.pkg.manifest().edition(),
- rustflags: extra_flags,
- });
- cx.fingerprints.insert(*unit, Arc::clone(&fingerprint));
- Ok(fingerprint)
-}
-
-// We want to use the mtime for files if we're a path source, but if we're a
-// git/registry source, then the mtime of files may fluctuate, but they won't
-// change so long as the source itself remains constant (which is the
-// responsibility of the source)
-fn use_dep_info(unit: &Unit) -> bool {
- let path = unit.pkg.summary().source_id().is_path();
- !unit.profile.doc && path
-}
-
-/// Prepare the necessary work for the fingerprint of a build command.
-///
-/// Build commands are located on packages, not on targets. Additionally, we
-/// don't have --dep-info to drive calculation of the fingerprint of a build
-/// command. This brings up an interesting predicament which gives us a few
-/// options to figure out whether a build command is dirty or not:
-///
-/// 1. A build command is dirty if *any* file in a package changes. In theory
-/// all files are candidate for being used by the build command.
-/// 2. A build command is dirty if any file in a *specific directory* changes.
-/// This may lose information as it may require files outside of the specific
-/// directory.
-/// 3. A build command must itself provide a dep-info-like file stating how it
-/// should be considered dirty or not.
-///
-/// The currently implemented solution is option (1), although it is planned to
-/// migrate to option (2) in the near future.
-pub fn prepare_build_cmd<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<Preparation> {
- let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id()));
- let new = cx.files().fingerprint_dir(unit);
- let loc = new.join("build");
-
- debug!("fingerprint at: {}", loc.display());
-
- let (local, output_path) = build_script_local_fingerprints(cx, unit)?;
- let mut fingerprint = Fingerprint {
- rustc: 0,
- target: 0,
- profile: 0,
- path: 0,
- features: String::new(),
- deps: Vec::new(),
- local,
- memoized_hash: Mutex::new(None),
- edition: Edition::Edition2015,
- rustflags: Vec::new(),
- };
- let compare = compare_old_fingerprint(&loc, &fingerprint);
- log_compare(unit, &compare);
-
- // When we write out the fingerprint, we may want to actually change the
- // kind of fingerprint being recorded. If we started out, then the previous
- // run of the build script (or if it had never run before) may indicate to
- // use the `Precalculated` variant with the `pkg_fingerprint`. If the build
- // script then prints `rerun-if-changed`, however, we need to record what's
- // necessary for that fingerprint.
- //
- // Hence, if there were some `rerun-if-changed` directives forcibly change
- // the kind of fingerprint by reinterpreting the dependencies output by the
- // build script.
- let state = Arc::clone(&cx.build_state);
- let key = (unit.pkg.package_id().clone(), unit.kind);
- let pkg_root = unit.pkg.root().to_path_buf();
- let target_root = cx.files().target_root().to_path_buf();
- let write_fingerprint = Work::new(move |_| {
- if let Some(output_path) = output_path {
- let outputs = state.outputs.lock().unwrap();
- let outputs = &outputs[&key];
- if !outputs.rerun_if_changed.is_empty() || !outputs.rerun_if_env_changed.is_empty() {
- let deps = BuildDeps::new(&output_path, Some(outputs));
- fingerprint.local = local_fingerprints_deps(&deps, &target_root, &pkg_root);
- fingerprint.update_local(&target_root)?;
- }
- }
- write_fingerprint(&loc, &fingerprint)
- });
-
- Ok((
- if compare.is_ok() { Fresh } else { Dirty },
- write_fingerprint,
- Work::noop(),
- ))
-}
-
-fn build_script_local_fingerprints<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<(Vec<LocalFingerprint>, Option<PathBuf>)> {
- let state = cx.build_state.outputs.lock().unwrap();
- // First up, if this build script is entirely overridden, then we just
- // return the hash of what we overrode it with.
- //
- // Note that the `None` here means that we don't want to update the local
- // fingerprint afterwards because this is all just overridden.
- if let Some(output) = state.get(&(unit.pkg.package_id().clone(), unit.kind)) {
- debug!("override local fingerprints deps");
- let s = format!(
- "overridden build state with hash: {}",
- util::hash_u64(output)
- );
- return Ok((vec![LocalFingerprint::Precalculated(s)], None));
- }
-
- // Next up we look at the previously listed dependencies for the build
- // script. If there are none then we're in the "old mode" where we just
- // assume that we're changed if anything in the packaged changed. The
- // `Some` here though means that we want to update our local fingerprints
- // after we're done as running this build script may have created more
- // dependencies.
- let deps = &cx.build_explicit_deps[unit];
- let output = deps.build_script_output.clone();
- if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() {
- debug!("old local fingerprints deps");
- let s = pkg_fingerprint(cx, unit.pkg)?;
- return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output)));
- }
-
- // Ok so now we're in "new mode" where we can have files listed as
- // dependencies as well as env vars listed as dependencies. Process them all
- // here.
- Ok((
- local_fingerprints_deps(deps, cx.files().target_root(), unit.pkg.root()),
- Some(output),
- ))
-}
-
-fn local_fingerprints_deps(
- deps: &BuildDeps,
- target_root: &Path,
- pkg_root: &Path,
-) -> Vec<LocalFingerprint> {
- debug!("new local fingerprints deps");
- let mut local = Vec::new();
- if !deps.rerun_if_changed.is_empty() {
- let output = &deps.build_script_output;
- let deps = deps.rerun_if_changed.iter().map(|p| pkg_root.join(p));
- let mtime = mtime_if_fresh(output, deps);
- local.push(LocalFingerprint::mtime(target_root, mtime, output));
- }
-
- for var in deps.rerun_if_env_changed.iter() {
- let val = env::var(var).ok();
- local.push(LocalFingerprint::EnvBased(var.clone(), val));
- }
-
- local
-}
-
-fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> {
- let hash = fingerprint.hash();
- debug!("write fingerprint: {}", loc.display());
- paths::write(loc, util::to_hex(hash).as_bytes())?;
- paths::write(
- &loc.with_extension("json"),
- &serde_json::to_vec(&fingerprint).unwrap(),
- )?;
- Ok(())
-}
-
-/// Prepare for work when a package starts to build
-pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> {
- let new1 = cx.files().fingerprint_dir(unit);
-
- if fs::metadata(&new1).is_err() {
- fs::create_dir(&new1)?;
- }
-
- Ok(())
-}
-
-pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf {
- cx.files()
- .fingerprint_dir(unit)
- .join(&format!("dep-{}", filename(cx, unit)))
-}
-
-fn compare_old_fingerprint(loc: &Path, new_fingerprint: &Fingerprint) -> CargoResult<()> {
- let old_fingerprint_short = paths::read(loc)?;
- let new_hash = new_fingerprint.hash();
-
- if util::to_hex(new_hash) == old_fingerprint_short {
- return Ok(());
- }
-
- let old_fingerprint_json = paths::read(&loc.with_extension("json"))?;
- let old_fingerprint = serde_json::from_str(&old_fingerprint_json)
- .chain_err(|| internal("failed to deserialize json"))?;
- new_fingerprint.compare(&old_fingerprint)
-}
-
-fn log_compare(unit: &Unit, compare: &CargoResult<()>) {
- let ce = match *compare {
- Ok(..) => return,
- Err(ref e) => e,
- };
- info!("fingerprint error for {}: {}", unit.pkg, ce);
-
- for cause in ce.causes().skip(1) {
- info!(" cause: {}", cause);
- }
-}
-
-// Parse the dep-info into a list of paths
-pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult<Option<Vec<PathBuf>>> {
- let data = match paths::read_bytes(dep_info) {
- Ok(data) => data,
- Err(_) => return Ok(None),
- };
- let paths = data.split(|&x| x == 0)
- .filter(|x| !x.is_empty())
- .map(|p| util::bytes2path(p).map(|p| pkg.root().join(p)))
- .collect::<Result<Vec<_>, _>>()?;
- if paths.is_empty() {
- Ok(None)
- } else {
- Ok(Some(paths))
- }
-}
-
-fn dep_info_mtime_if_fresh(pkg: &Package, dep_info: &Path) -> CargoResult<Option<FileTime>> {
- if let Some(paths) = parse_dep_info(pkg, dep_info)? {
- Ok(mtime_if_fresh(dep_info, paths.iter()))
- } else {
- Ok(None)
- }
-}
-
-fn pkg_fingerprint(cx: &Context, pkg: &Package) -> CargoResult<String> {
- let source_id = pkg.package_id().source_id();
- let sources = cx.packages.sources();
-
- let source = sources
- .get(source_id)
- .ok_or_else(|| internal("missing package source"))?;
- source.fingerprint(pkg)
-}
-
-fn mtime_if_fresh<I>(output: &Path, paths: I) -> Option<FileTime>
-where
- I: IntoIterator,
- I::Item: AsRef<Path>,
-{
- let meta = match fs::metadata(output) {
- Ok(meta) => meta,
- Err(..) => return None,
- };
- let mtime = FileTime::from_last_modification_time(&meta);
-
- let any_stale = paths.into_iter().any(|path| {
- let path = path.as_ref();
- let meta = match fs::metadata(path) {
- Ok(meta) => meta,
- Err(..) => {
- info!("stale: {} -- missing", path.display());
- return true;
- }
- };
- let mtime2 = FileTime::from_last_modification_time(&meta);
- if mtime2 > mtime {
- info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime);
- true
- } else {
- false
- }
- });
-
- if any_stale {
- None
- } else {
- Some(mtime)
- }
-}
-
-fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String {
- // file_stem includes metadata hash. Thus we have a different
- // fingerprint for every metadata hash version. This works because
- // even if the package is fresh, we'll still link the fresh target
- let file_stem = cx.files().file_stem(unit);
- let kind = match *unit.target.kind() {
- TargetKind::Lib(..) => "lib",
- TargetKind::Bin => "bin",
- TargetKind::Test => "integration-test",
- TargetKind::ExampleBin | TargetKind::ExampleLib(..) => "example",
- TargetKind::Bench => "bench",
- TargetKind::CustomBuild => "build-script",
- };
- let flavor = if unit.profile.test {
- "test-"
- } else if unit.profile.doc {
- "doc-"
- } else {
- ""
- };
- format!("{}{}-{}", flavor, kind, file_stem)
-}
-
-/// Parses the dep-info file coming out of rustc into a Cargo-specific format.
-///
-/// This function will parse `rustc_dep_info` as a makefile-style dep info to
-/// learn about the all files which a crate depends on. This is then
-/// re-serialized into the `cargo_dep_info` path in a Cargo-specific format.
-///
-/// The `pkg_root` argument here is the absolute path to the directory
-/// containing `Cargo.toml` for this crate that was compiled. The paths listed
-/// in the rustc dep-info file may or may not be absolute but we'll want to
-/// consider all of them relative to the `root` specified.
-///
-/// The `rustc_cwd` argument is the absolute path to the cwd of the compiler
-/// when it was invoked.
-///
-/// The serialized Cargo format will contain a list of files, all of which are
-/// relative if they're under `root`. or absolute if they're elsewhere.
-pub fn translate_dep_info(
- rustc_dep_info: &Path,
- cargo_dep_info: &Path,
- pkg_root: &Path,
- rustc_cwd: &Path,
-) -> CargoResult<()> {
- let target = parse_rustc_dep_info(rustc_dep_info)?;
- let deps = &target
- .get(0)
- .ok_or_else(|| internal("malformed dep-info format, no targets".to_string()))?
- .1;
-
- let mut new_contents = Vec::new();
- for file in deps {
- let absolute = rustc_cwd.join(file);
- let path = absolute.strip_prefix(pkg_root).unwrap_or(&absolute);
- new_contents.extend(util::path2bytes(path)?);
- new_contents.push(0);
- }
- paths::write(cargo_dep_info, &new_contents)?;
- Ok(())
-}
-
-pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult<Vec<(String, Vec<String>)>> {
- let contents = paths::read(rustc_dep_info)?;
- contents
- .lines()
- .filter_map(|l| l.find(": ").map(|i| (l, i)))
- .map(|(line, pos)| {
- let target = &line[..pos];
- let mut deps = line[pos + 2..].split_whitespace();
-
- let mut ret = Vec::new();
- while let Some(s) = deps.next() {
- let mut file = s.to_string();
- while file.ends_with('\\') {
- file.pop();
- file.push(' ');
- file.push_str(deps.next().ok_or_else(|| {
- internal("malformed dep-info format, trailing \\".to_string())
- })?);
- }
- ret.push(file);
- }
- Ok((target.to_string(), ret))
- })
- .collect()
-}
+++ /dev/null
-use std::fmt;
-
-use util::{CargoResult, Dirty, Fresh, Freshness};
-use super::job_queue::JobState;
-
-pub struct Job {
- dirty: Work,
- fresh: Work,
-}
-
-/// Each proc should send its description before starting.
-/// It should send either once or close immediately.
-pub struct Work {
- inner: Box<for<'a, 'b> FnBox<&'a JobState<'b>, CargoResult<()>> + Send>,
-}
-
-trait FnBox<A, R> {
- fn call_box(self: Box<Self>, a: A) -> R;
-}
-
-impl<A, R, F: FnOnce(A) -> R> FnBox<A, R> for F {
- fn call_box(self: Box<F>, a: A) -> R {
- (*self)(a)
- }
-}
-
-impl Work {
- pub fn new<F>(f: F) -> Work
- where
- F: FnOnce(&JobState) -> CargoResult<()> + Send + 'static,
- {
- Work { inner: Box::new(f) }
- }
-
- pub fn noop() -> Work {
- Work::new(|_| Ok(()))
- }
-
- pub fn call(self, tx: &JobState) -> CargoResult<()> {
- self.inner.call_box(tx)
- }
-
- pub fn then(self, next: Work) -> Work {
- Work::new(move |state| {
- self.call(state)?;
- next.call(state)
- })
- }
-}
-
-impl Job {
- /// Create a new job representing a unit of work.
- pub fn new(dirty: Work, fresh: Work) -> Job {
- Job { dirty, fresh }
- }
-
- /// Consumes this job by running it, returning the result of the
- /// computation.
- pub fn run(self, fresh: Freshness, state: &JobState) -> CargoResult<()> {
- match fresh {
- Fresh => self.fresh.call(state),
- Dirty => self.dirty.call(state),
- }
- }
-}
-
-impl fmt::Debug for Job {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(f, "Job {{ ... }}")
- }
-}
+++ /dev/null
-use std::collections::HashSet;
-use std::collections::hash_map::HashMap;
-use std::fmt;
-use std::io;
-use std::mem;
-use std::sync::mpsc::{channel, Receiver, Sender};
-
-use crossbeam::{self, Scope};
-use jobserver::{Acquired, HelperThread};
-
-use core::{PackageId, Profile, Target};
-use util::{Config, DependencyQueue, Dirty, Fresh, Freshness};
-use util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder};
-use handle_error;
-
-use super::{Context, Kind, Unit};
-use super::job::Job;
-
-/// A management structure of the entire dependency graph to compile.
-///
-/// This structure is backed by the `DependencyQueue` type and manages the
-/// actual compilation step of each package. Packages enqueue units of work and
-/// then later on the entire graph is processed and compiled.
-pub struct JobQueue<'a> {
- queue: DependencyQueue<Key<'a>, Vec<(Job, Freshness)>>,
- tx: Sender<Message<'a>>,
- rx: Receiver<Message<'a>>,
- active: usize,
- pending: HashMap<Key<'a>, PendingBuild>,
- compiled: HashSet<&'a PackageId>,
- documented: HashSet<&'a PackageId>,
- counts: HashMap<&'a PackageId, usize>,
- is_release: bool,
-}
-
-/// A helper structure for metadata about the state of a building package.
-struct PendingBuild {
- /// Number of jobs currently active
- amt: usize,
- /// Current freshness state of this package. Any dirty target within a
- /// package will cause the entire package to become dirty.
- fresh: Freshness,
-}
-
-#[derive(Clone, Copy, Eq, PartialEq, Hash)]
-struct Key<'a> {
- pkg: &'a PackageId,
- target: &'a Target,
- profile: &'a Profile,
- kind: Kind,
-}
-
-pub struct JobState<'a> {
- tx: Sender<Message<'a>>,
-}
-
-enum Message<'a> {
- Run(String),
- Stdout(String),
- Stderr(String),
- Token(io::Result<Acquired>),
- Finish(Key<'a>, CargoResult<()>),
-}
-
-impl<'a> JobState<'a> {
- pub fn running(&self, cmd: &ProcessBuilder) {
- let _ = self.tx.send(Message::Run(cmd.to_string()));
- }
-
- pub fn stdout(&self, out: &str) {
- let _ = self.tx.send(Message::Stdout(out.to_string()));
- }
-
- pub fn stderr(&self, err: &str) {
- let _ = self.tx.send(Message::Stderr(err.to_string()));
- }
-}
-
-impl<'a> JobQueue<'a> {
- pub fn new<'cfg>(cx: &Context<'a, 'cfg>) -> JobQueue<'a> {
- let (tx, rx) = channel();
- JobQueue {
- queue: DependencyQueue::new(),
- tx,
- rx,
- active: 0,
- pending: HashMap::new(),
- compiled: HashSet::new(),
- documented: HashSet::new(),
- counts: HashMap::new(),
- is_release: cx.build_config.release,
- }
- }
-
- pub fn enqueue<'cfg>(
- &mut self,
- cx: &Context<'a, 'cfg>,
- unit: &Unit<'a>,
- job: Job,
- fresh: Freshness,
- ) -> CargoResult<()> {
- let key = Key::new(unit);
- let deps = key.dependencies(cx)?;
- self.queue
- .queue(Fresh, key, Vec::new(), &deps)
- .push((job, fresh));
- *self.counts.entry(key.pkg).or_insert(0) += 1;
- Ok(())
- }
-
- /// Execute all jobs necessary to build the dependency graph.
- ///
- /// This function will spawn off `config.jobs()` workers to build all of the
- /// necessary dependencies, in order. Freshness is propagated as far as
- /// possible along each dependency chain.
- pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> {
- let _p = profile::start("executing the job graph");
- self.queue.queue_finished();
-
- // We need to give a handle to the send half of our message queue to the
- // jobserver helper thread. Unfortunately though we need the handle to be
- // `'static` as that's typically what's required when spawning a
- // thread!
- //
- // To work around this we transmute the `Sender` to a static lifetime.
- // we're only sending "longer living" messages and we should also
- // destroy all references to the channel before this function exits as
- // the destructor for the `helper` object will ensure the associated
- // thread is no longer running.
- //
- // As a result, this `transmute` to a longer lifetime should be safe in
- // practice.
- let tx = self.tx.clone();
- let tx = unsafe { mem::transmute::<Sender<Message<'a>>, Sender<Message<'static>>>(tx) };
- let helper = cx.jobserver
- .clone()
- .into_helper_thread(move |token| {
- drop(tx.send(Message::Token(token)));
- })
- .chain_err(|| "failed to create helper thread for jobserver management")?;
-
- crossbeam::scope(|scope| self.drain_the_queue(cx, scope, &helper))
- }
-
- fn drain_the_queue(
- &mut self,
- cx: &mut Context,
- scope: &Scope<'a>,
- jobserver_helper: &HelperThread,
- ) -> CargoResult<()> {
- let mut tokens = Vec::new();
- let mut queue = Vec::new();
- trace!("queue: {:#?}", self.queue);
-
- // Iteratively execute the entire dependency graph. Each turn of the
- // loop starts out by scheduling as much work as possible (up to the
- // maximum number of parallel jobs we have tokens for). A local queue
- // is maintained separately from the main dependency queue as one
- // dequeue may actually dequeue quite a bit of work (e.g. 10 binaries
- // in one project).
- //
- // After a job has finished we update our internal state if it was
- // successful and otherwise wait for pending work to finish if it failed
- // and then immediately return.
- let mut error = None;
- loop {
- // Dequeue as much work as we can, learning about everything
- // possible that can run. Note that this is also the point where we
- // start requesting job tokens. Each job after the first needs to
- // request a token.
- while let Some((fresh, key, jobs)) = self.queue.dequeue() {
- let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| f.combine(fresh));
- self.pending.insert(
- key,
- PendingBuild {
- amt: jobs.len(),
- fresh: total_fresh,
- },
- );
- for (job, f) in jobs {
- queue.push((key, job, f.combine(fresh)));
- if self.active + queue.len() > 0 {
- jobserver_helper.request_token();
- }
- }
- }
-
- // Now that we've learned of all possible work that we can execute
- // try to spawn it so long as we've got a jobserver token which says
- // we're able to perform some parallel work.
- while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() {
- let (key, job, fresh) = queue.remove(0);
- self.run(key, fresh, job, cx.config, scope)?;
- }
-
- // If after all that we're not actually running anything then we're
- // done!
- if self.active == 0 {
- break;
- }
-
- // And finally, before we block waiting for the next event, drop any
- // excess tokens we may have accidentally acquired. Due to how our
- // jobserver interface is architected we may acquire a token that we
- // don't actually use, and if this happens just relinquish it back
- // to the jobserver itself.
- tokens.truncate(self.active - 1);
-
- match self.rx.recv().unwrap() {
- Message::Run(cmd) => {
- cx.config.shell().verbose(|c| c.status("Running", &cmd))?;
- }
- Message::Stdout(out) => {
- if cx.config.extra_verbose() {
- println!("{}", out);
- }
- }
- Message::Stderr(err) => {
- if cx.config.extra_verbose() {
- writeln!(cx.config.shell().err(), "{}", err)?;
- }
- }
- Message::Finish(key, result) => {
- info!("end: {:?}", key);
- self.active -= 1;
- if self.active > 0 {
- assert!(!tokens.is_empty());
- drop(tokens.pop());
- }
- match result {
- Ok(()) => self.finish(key, cx)?,
- Err(e) => {
- let msg = "The following warnings were emitted during compilation:";
- self.emit_warnings(Some(msg), key, cx)?;
-
- if self.active > 0 {
- error = Some(format_err!("build failed"));
- handle_error(e, &mut *cx.config.shell());
- cx.config.shell().warn(
- "build failed, waiting for other \
- jobs to finish...",
- )?;
- } else {
- error = Some(e);
- }
- }
- }
- }
- Message::Token(acquired_token) => {
- tokens.push(acquired_token.chain_err(|| "failed to acquire jobserver token")?);
- }
- }
- }
-
- let build_type = if self.is_release { "release" } else { "dev" };
- let profile = cx.lib_profile();
- let mut opt_type = String::from(if profile.opt_level == "0" {
- "unoptimized"
- } else {
- "optimized"
- });
- if profile.debuginfo.is_some() {
- opt_type += " + debuginfo";
- }
- let duration = cx.config.creation_time().elapsed();
- let time_elapsed = format!(
- "{}.{1:.2} secs",
- duration.as_secs(),
- duration.subsec_nanos() / 10_000_000
- );
- if self.queue.is_empty() {
- let message = format!(
- "{} [{}] target(s) in {}",
- build_type, opt_type, time_elapsed
- );
- cx.config.shell().status("Finished", message)?;
- Ok(())
- } else if let Some(e) = error {
- Err(e)
- } else {
- debug!("queue: {:#?}", self.queue);
- Err(internal("finished with jobs still left in the queue"))
- }
- }
-
- /// Executes a job in the `scope` given, pushing the spawned thread's
- /// handled onto `threads`.
- fn run(
- &mut self,
- key: Key<'a>,
- fresh: Freshness,
- job: Job,
- config: &Config,
- scope: &Scope<'a>,
- ) -> CargoResult<()> {
- info!("start: {:?}", key);
-
- self.active += 1;
- *self.counts.get_mut(key.pkg).unwrap() -= 1;
-
- let my_tx = self.tx.clone();
- let doit = move || {
- let res = job.run(fresh, &JobState { tx: my_tx.clone() });
- my_tx.send(Message::Finish(key, res)).unwrap();
- };
- match fresh {
- Freshness::Fresh => doit(),
- Freshness::Dirty => {
- scope.spawn(doit);
- }
- }
-
- // Print out some nice progress information
- self.note_working_on(config, &key, fresh)?;
-
- Ok(())
- }
-
- fn emit_warnings(&self, msg: Option<&str>, key: Key<'a>, cx: &mut Context) -> CargoResult<()> {
- let output = cx.build_state.outputs.lock().unwrap();
- if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) {
- if let Some(msg) = msg {
- if !output.warnings.is_empty() {
- writeln!(cx.config.shell().err(), "{}\n", msg)?;
- }
- }
-
- for warning in output.warnings.iter() {
- cx.config.shell().warn(warning)?;
- }
-
- if !output.warnings.is_empty() && msg.is_some() {
- // Output an empty line.
- writeln!(cx.config.shell().err(), "")?;
- }
- }
-
- Ok(())
- }
-
- fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> {
- if key.profile.run_custom_build && cx.show_warnings(key.pkg) {
- self.emit_warnings(None, key, cx)?;
- }
-
- let state = self.pending.get_mut(&key).unwrap();
- state.amt -= 1;
- if state.amt == 0 {
- self.queue.finish(&key, state.fresh);
- }
- Ok(())
- }
-
- // This isn't super trivial because we don't want to print loads and
- // loads of information to the console, but we also want to produce a
- // faithful representation of what's happening. This is somewhat nuanced
- // as a package can start compiling *very* early on because of custom
- // build commands and such.
- //
- // In general, we try to print "Compiling" for the first nontrivial task
- // run for a package, regardless of when that is. We then don't print
- // out any more information for a package after we've printed it once.
- fn note_working_on(
- &mut self,
- config: &Config,
- key: &Key<'a>,
- fresh: Freshness,
- ) -> CargoResult<()> {
- if (self.compiled.contains(key.pkg) && !key.profile.doc)
- || (self.documented.contains(key.pkg) && key.profile.doc)
- {
- return Ok(());
- }
-
- match fresh {
- // Any dirty stage which runs at least one command gets printed as
- // being a compiled package
- Dirty => {
- if key.profile.doc {
- if !key.profile.test {
- self.documented.insert(key.pkg);
- config.shell().status("Documenting", key.pkg)?;
- }
- } else {
- self.compiled.insert(key.pkg);
- config.shell().status("Compiling", key.pkg)?;
- }
- }
- Fresh if self.counts[key.pkg] == 0 => {
- self.compiled.insert(key.pkg);
- config.shell().verbose(|c| c.status("Fresh", key.pkg))?;
- }
- Fresh => {}
- }
- Ok(())
- }
-}
-
-impl<'a> Key<'a> {
- fn new(unit: &Unit<'a>) -> Key<'a> {
- Key {
- pkg: unit.pkg.package_id(),
- target: unit.target,
- profile: unit.profile,
- kind: unit.kind,
- }
- }
-
- fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult<Vec<Key<'a>>> {
- let unit = Unit {
- pkg: cx.get_package(self.pkg)?,
- target: self.target,
- profile: self.profile,
- kind: self.kind,
- };
- let targets = cx.dep_targets(&unit);
- Ok(targets
- .iter()
- .filter_map(|unit| {
- // Binaries aren't actually needed to *compile* tests, just to run
- // them, so we don't include this dependency edge in the job graph.
- if self.target.is_test() && unit.target.is_bin() {
- None
- } else {
- Some(Key::new(unit))
- }
- })
- .collect())
- }
-}
-
-impl<'a> fmt::Debug for Key<'a> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(
- f,
- "{} => {}/{} => {:?}",
- self.pkg, self.target, self.profile, self.kind
- )
- }
-}
+++ /dev/null
-//! Management of the directory layout of a build
-//!
-//! The directory layout is a little tricky at times, hence a separate file to
-//! house this logic. The current layout looks like this:
-//!
-//! ```ignore
-//! # This is the root directory for all output, the top-level package
-//! # places all of its output here.
-//! target/
-//!
-//! # This is the root directory for all output of *dependencies*
-//! deps/
-//!
-//! # Root directory for all compiled examples
-//! examples/
-//!
-//! # This is the location at which the output of all custom build
-//! # commands are rooted
-//! build/
-//!
-//! # Each package gets its own directory where its build script and
-//! # script output are placed
-//! $pkg1/
-//! $pkg2/
-//! $pkg3/
-//!
-//! # Each directory package has a `out` directory where output
-//! # is placed.
-//! out/
-//!
-//! # This is the location at which the output of all old custom build
-//! # commands are rooted
-//! native/
-//!
-//! # Each package gets its own directory for where its output is
-//! # placed. We can't track exactly what's getting put in here, so
-//! # we just assume that all relevant output is in these
-//! # directories.
-//! $pkg1/
-//! $pkg2/
-//! $pkg3/
-//!
-//! # Directory used to store incremental data for the compiler (when
-//! # incremental is enabled.
-//! incremental/
-//!
-//! # Hidden directory that holds all of the fingerprint files for all
-//! # packages
-//! .fingerprint/
-//! ```
-
-use std::fs;
-use std::io;
-use std::path::{Path, PathBuf};
-
-use core::Workspace;
-use util::{CargoResult, Config, FileLock, Filesystem};
-
-/// Contains the paths of all target output locations.
-///
-/// See module docs for more information.
-pub struct Layout {
- root: PathBuf,
- deps: PathBuf,
- native: PathBuf,
- build: PathBuf,
- incremental: PathBuf,
- fingerprint: PathBuf,
- examples: PathBuf,
- /// The lockfile for a build, will be unlocked when this struct is `drop`ped.
- _lock: FileLock,
-}
-
-pub fn is_bad_artifact_name(name: &str) -> bool {
- ["deps", "examples", "build", "native", "incremental"]
- .iter()
- .any(|&reserved| reserved == name)
-}
-
-impl Layout {
- /// Calculate the paths for build output, lock the build directory, and return as a Layout.
- ///
- /// This function will block if the directory is already locked.
- ///
- /// Differs from `at` in that this calculates the root path from the workspace target directory,
- /// adding the target triple and the profile (debug, release, ...).
- pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CargoResult<Layout> {
- let mut path = ws.target_dir();
- // Flexible target specifications often point at filenames, so interpret
- // the target triple as a Path and then just use the file stem as the
- // component for the directory name.
- if let Some(triple) = triple {
- path.push(Path::new(triple)
- .file_stem()
- .ok_or_else(|| format_err!("invalid target"))?);
- }
- path.push(dest);
- Layout::at(ws.config(), path)
- }
-
- /// Calculate the paths for build output, lock the build directory, and return as a Layout.
- ///
- /// This function will block if the directory is already locked.
- pub fn at(config: &Config, root: Filesystem) -> CargoResult<Layout> {
- // For now we don't do any more finer-grained locking on the artifact
- // directory, so just lock the entire thing for the duration of this
- // compile.
- let lock = root.open_rw(".cargo-lock", config, "build directory")?;
- let root = root.into_path_unlocked();
-
- Ok(Layout {
- deps: root.join("deps"),
- native: root.join("native"),
- build: root.join("build"),
- incremental: root.join("incremental"),
- fingerprint: root.join(".fingerprint"),
- examples: root.join("examples"),
- root,
- _lock: lock,
- })
- }
-
- #[cfg(not(target_os = "macos"))]
- fn exclude_from_backups(&self, _: &Path) {}
-
- #[cfg(target_os = "macos")]
- /// Marks files or directories as excluded from Time Machine on macOS
- ///
- /// This is recommended to prevent derived/temporary files from bloating backups.
- fn exclude_from_backups(&self, path: &Path) {
- use std::ptr;
- use core_foundation::{number, string, url};
- use core_foundation::base::TCFType;
-
- // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey
- let is_excluded_key: Result<string::CFString, _> = "NSURLIsExcludedFromBackupKey".parse();
- match (url::CFURL::from_path(path, false), is_excluded_key) {
- (Some(path), Ok(is_excluded_key)) => unsafe {
- url::CFURLSetResourcePropertyForKey(
- path.as_concrete_TypeRef(),
- is_excluded_key.as_concrete_TypeRef(),
- number::kCFBooleanTrue as *const _,
- ptr::null_mut(),
- );
- },
- // Errors are ignored, since it's an optional feature and failure
- // doesn't prevent Cargo from working
- _ => {}
- }
- }
-
- /// Make sure all directories stored in the Layout exist on the filesystem.
- pub fn prepare(&mut self) -> io::Result<()> {
- if fs::metadata(&self.root).is_err() {
- fs::create_dir_all(&self.root)?;
- }
-
- self.exclude_from_backups(&self.root);
-
- mkdir(&self.deps)?;
- mkdir(&self.native)?;
- mkdir(&self.incremental)?;
- mkdir(&self.fingerprint)?;
- mkdir(&self.examples)?;
- mkdir(&self.build)?;
-
- return Ok(());
-
- fn mkdir(dir: &Path) -> io::Result<()> {
- if fs::metadata(&dir).is_err() {
- fs::create_dir(dir)?;
- }
- Ok(())
- }
- }
-
- /// Fetch the root path.
- pub fn dest(&self) -> &Path {
- &self.root
- }
- /// Fetch the deps path.
- pub fn deps(&self) -> &Path {
- &self.deps
- }
- /// Fetch the examples path.
- pub fn examples(&self) -> &Path {
- &self.examples
- }
- /// Fetch the root path.
- pub fn root(&self) -> &Path {
- &self.root
- }
- /// Fetch the incremental path.
- pub fn incremental(&self) -> &Path {
- &self.incremental
- }
- /// Fetch the fingerprint path.
- pub fn fingerprint(&self) -> &Path {
- &self.fingerprint
- }
- /// Fetch the build path.
- pub fn build(&self) -> &Path {
- &self.build
- }
-}
+++ /dev/null
-use std::collections::{HashMap, HashSet};
-use std::fmt::Write;
-
-use core::{PackageId, Resolve};
-use util::CargoResult;
-use super::Unit;
-
-#[derive(Default)]
-pub struct Links<'a> {
- validated: HashSet<&'a PackageId>,
- links: HashMap<String, &'a PackageId>,
-}
-
-impl<'a> Links<'a> {
- pub fn new() -> Links<'a> {
- Links {
- validated: HashSet::new(),
- links: HashMap::new(),
- }
- }
-
- pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'a>) -> CargoResult<()> {
- if !self.validated.insert(unit.pkg.package_id()) {
- return Ok(());
- }
- let lib = match unit.pkg.manifest().links() {
- Some(lib) => lib,
- None => return Ok(()),
- };
- if let Some(prev) = self.links.get(lib) {
- let pkg = unit.pkg.package_id();
-
- let describe_path = |pkgid: &PackageId| -> String {
- let dep_path = resolve.path_to_top(pkgid);
- let mut dep_path_desc = format!("package `{}`", dep_path[0]);
- for dep in dep_path.iter().skip(1) {
- write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap();
- }
- dep_path_desc
- };
-
- bail!(
- "multiple packages link to native library `{}`, \
- but a native library can be linked only once\n\
- \n\
- {}\nlinks to native library `{}`\n\
- \n\
- {}\nalso links to native library `{}`",
- lib,
- describe_path(prev),
- lib,
- describe_path(pkg),
- lib
- )
- }
- if !unit.pkg
- .manifest()
- .targets()
- .iter()
- .any(|t| t.is_custom_build())
- {
- bail!(
- "package `{}` specifies that it links to `{}` but does not \
- have a custom build script",
- unit.pkg.package_id(),
- lib
- )
- }
- self.links.insert(lib.to_string(), unit.pkg.package_id());
- Ok(())
- }
-}
+++ /dev/null
-use std::collections::HashMap;
-use std::env;
-use std::ffi::{OsStr, OsString};
-use std::fs;
-use std::io::{self, Write};
-use std::path::{self, Path, PathBuf};
-use std::sync::Arc;
-
-use same_file::is_same_file;
-use serde_json;
-
-use core::{Feature, PackageId, Profile, Target};
-use core::manifest::Lto;
-use core::shell::ColorChoice;
-use util::{self, machine_message, ProcessBuilder};
-use util::{internal, join_paths, profile};
-use util::paths;
-use util::errors::{CargoResult, CargoResultExt, Internal};
-use util::Freshness;
-
-use self::job::{Job, Work};
-use self::job_queue::JobQueue;
-
-use self::output_depinfo::output_depinfo;
-
-pub use self::compilation::Compilation;
-pub use self::context::{Context, FileFlavor, Unit};
-pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts};
-pub use self::layout::is_bad_artifact_name;
-
-mod compilation;
-mod context;
-mod custom_build;
-mod fingerprint;
-mod job;
-mod job_queue;
-mod layout;
-mod links;
-mod output_depinfo;
-
-/// Whether an object is for the host arch, or the target arch.
-///
-/// These will be the same unless cross-compiling.
-#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord)]
-pub enum Kind {
- Host,
- Target,
-}
-
-/// Configuration information for a rustc build.
-#[derive(Default, Clone)]
-pub struct BuildConfig {
- /// The host arch triple
- ///
- /// e.g. x86_64-unknown-linux-gnu, would be
- /// - machine: x86_64
- /// - hardware-platform: unknown
- /// - operating system: linux-gnu
- pub host_triple: String,
- /// Build information for the host arch
- pub host: TargetConfig,
- /// The target arch triple, defaults to host arch
- pub requested_target: Option<String>,
- /// Build information for the target
- pub target: TargetConfig,
- /// How many rustc jobs to run in parallel
- pub jobs: u32,
- /// Whether we are building for release
- pub release: bool,
- /// Whether we are running tests
- pub test: bool,
- /// Whether we are building documentation
- pub doc_all: bool,
- /// Whether to print std output in json format (for machine reading)
- pub json_messages: bool,
-}
-
-impl BuildConfig {
- pub fn new(host_triple: &str, requested_target: &Option<String>) -> CargoResult<BuildConfig> {
- if let Some(ref s) = *requested_target {
- if s.trim().is_empty() {
- bail!("target was empty")
- }
- }
- Ok(BuildConfig {
- host_triple: host_triple.to_string(),
- requested_target: (*requested_target).clone(),
- jobs: 1,
- ..Default::default()
- })
- }
-}
-
-/// Information required to build for a target
-#[derive(Clone, Default)]
-pub struct TargetConfig {
- /// The path of archiver (lib builder) for this target.
- pub ar: Option<PathBuf>,
- /// The path of the linker for this target.
- pub linker: Option<PathBuf>,
- /// Special build options for any necessary input files (filename -> options)
- pub overrides: HashMap<String, BuildOutput>,
-}
-
-/// A glorified callback for executing calls to rustc. Rather than calling rustc
-/// directly, we'll use an Executor, giving clients an opportunity to intercept
-/// the build calls.
-pub trait Executor: Send + Sync + 'static {
- /// Called after a rustc process invocation is prepared up-front for a given
- /// unit of work (may still be modified for runtime-known dependencies, when
- /// the work is actually executed).
- fn init(&self, _cx: &Context, _unit: &Unit) {}
-
- /// In case of an `Err`, Cargo will not continue with the build process for
- /// this package.
- fn exec(&self, cmd: ProcessBuilder, _id: &PackageId, _target: &Target) -> CargoResult<()> {
- cmd.exec()?;
- Ok(())
- }
-
- fn exec_json(
- &self,
- cmd: ProcessBuilder,
- _id: &PackageId,
- _target: &Target,
- handle_stdout: &mut FnMut(&str) -> CargoResult<()>,
- handle_stderr: &mut FnMut(&str) -> CargoResult<()>,
- ) -> CargoResult<()> {
- cmd.exec_with_streaming(handle_stdout, handle_stderr, false)?;
- Ok(())
- }
-
- /// Queried when queuing each unit of work. If it returns true, then the
- /// unit will always be rebuilt, independent of whether it needs to be.
- fn force_rebuild(&self, _unit: &Unit) -> bool {
- false
- }
-}
-
-/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's
-/// default behaviour.
-#[derive(Copy, Clone)]
-pub struct DefaultExecutor;
-
-impl Executor for DefaultExecutor {}
-
-fn compile<'a, 'cfg: 'a>(
- cx: &mut Context<'a, 'cfg>,
- jobs: &mut JobQueue<'a>,
- unit: &Unit<'a>,
- exec: &Arc<Executor>,
-) -> CargoResult<()> {
- if !cx.compiled.insert(*unit) {
- return Ok(());
- }
-
- // Build up the work to be done to compile this unit, enqueuing it once
- // we've got everything constructed.
- let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name()));
- fingerprint::prepare_init(cx, unit)?;
- cx.links.validate(cx.resolve, unit)?;
-
- let (dirty, fresh, freshness) = if unit.profile.run_custom_build {
- custom_build::prepare(cx, unit)?
- } else if unit.profile.doc && unit.profile.test {
- // we run these targets later, so this is just a noop for now
- (Work::noop(), Work::noop(), Freshness::Fresh)
- } else {
- let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?;
- let work = if unit.profile.doc {
- rustdoc(cx, unit)?
- } else {
- rustc(cx, unit, exec)?
- };
- // Need to link targets on both the dirty and fresh
- let dirty = work.then(link_targets(cx, unit, false)?).then(dirty);
- let fresh = link_targets(cx, unit, true)?.then(fresh);
-
- if exec.force_rebuild(unit) {
- freshness = Freshness::Dirty;
- }
-
- (dirty, fresh, freshness)
- };
- jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?;
- drop(p);
-
- // Be sure to compile all dependencies of this target as well.
- for unit in cx.dep_targets(unit).iter() {
- compile(cx, jobs, unit, exec)?;
- }
-
- Ok(())
-}
-
-fn rustc<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
- exec: &Arc<Executor>,
-) -> CargoResult<Work> {
- let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?;
-
- let name = unit.pkg.name().to_string();
-
- // If this is an upstream dep we don't want warnings from, turn off all
- // lints.
- if !cx.show_warnings(unit.pkg.package_id()) {
- rustc.arg("--cap-lints").arg("allow");
-
- // If this is an upstream dep but we *do* want warnings, make sure that they
- // don't fail compilation.
- } else if !unit.pkg.package_id().source_id().is_path() {
- rustc.arg("--cap-lints").arg("warn");
- }
-
- let outputs = cx.outputs(unit)?;
- let root = cx.files().out_dir(unit);
- let kind = unit.kind;
-
- // Prepare the native lib state (extra -L and -l flags)
- let build_state = cx.build_state.clone();
- let current_id = unit.pkg.package_id().clone();
- let build_deps = load_build_deps(cx, unit);
-
- // If we are a binary and the package also contains a library, then we
- // don't pass the `-l` flags.
- let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib());
- let do_rename = unit.target.allows_underscores() && !unit.profile.test;
- let real_name = unit.target.name().to_string();
- let crate_name = unit.target.crate_name();
-
- // XXX(Rely on target_filenames iterator as source of truth rather than rederiving filestem)
- let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() {
- root.join(&crate_name)
- } else {
- root.join(&cx.files().file_stem(unit))
- }.with_extension("d");
- let dep_info_loc = fingerprint::dep_info_loc(cx, unit);
-
- rustc.args(&cx.rustflags_args(unit)?);
- let json_messages = cx.build_config.json_messages;
- let package_id = unit.pkg.package_id().clone();
- let target = unit.target.clone();
-
- exec.init(cx, unit);
- let exec = exec.clone();
-
- let root_output = cx.files().target_root().to_path_buf();
- let pkg_root = unit.pkg.root().to_path_buf();
- let cwd = rustc
- .get_cwd()
- .unwrap_or_else(|| cx.config.cwd())
- .to_path_buf();
-
- return Ok(Work::new(move |state| {
- // Only at runtime have we discovered what the extra -L and -l
- // arguments are for native libraries, so we process those here. We
- // also need to be sure to add any -L paths for our plugins to the
- // dynamic library load path as a plugin's dynamic library may be
- // located somewhere in there.
- // Finally, if custom environment variables have been produced by
- // previous build scripts, we include them in the rustc invocation.
- if let Some(build_deps) = build_deps {
- let build_state = build_state.outputs.lock().unwrap();
- add_native_deps(
- &mut rustc,
- &build_state,
- &build_deps,
- pass_l_flag,
- ¤t_id,
- )?;
- add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?;
- add_custom_env(&mut rustc, &build_state, ¤t_id, kind)?;
- }
-
- for output in outputs.iter() {
- // If there is both an rmeta and rlib, rustc will prefer to use the
- // rlib, even if it is older. Therefore, we must delete the rlib to
- // force using the new rmeta.
- if output.path.extension() == Some(OsStr::new("rmeta")) {
- let dst = root.join(&output.path).with_extension("rlib");
- if dst.exists() {
- paths::remove_file(&dst)?;
- }
- }
- }
-
- state.running(&rustc);
- if json_messages {
- exec.exec_json(
- rustc,
- &package_id,
- &target,
- &mut |line| {
- if !line.is_empty() {
- Err(internal(&format!(
- "compiler stdout is not empty: `{}`",
- line
- )))
- } else {
- Ok(())
- }
- },
- &mut |line| {
- // stderr from rustc can have a mix of JSON and non-JSON output
- if line.starts_with('{') {
- // Handle JSON lines
- let compiler_message = serde_json::from_str(line).map_err(|_| {
- internal(&format!("compiler produced invalid json: `{}`", line))
- })?;
-
- machine_message::emit(&machine_message::FromCompiler {
- package_id: &package_id,
- target: &target,
- message: compiler_message,
- });
- } else {
- // Forward non-JSON to stderr
- writeln!(io::stderr(), "{}", line)?;
- }
- Ok(())
- },
- ).chain_err(|| format!("Could not compile `{}`.", name))?;
- } else {
- exec.exec(rustc, &package_id, &target)
- .map_err(Internal::new)
- .chain_err(|| format!("Could not compile `{}`.", name))?;
- }
-
- if do_rename && real_name != crate_name {
- let dst = &outputs[0].path;
- let src = dst.with_file_name(
- dst.file_name()
- .unwrap()
- .to_str()
- .unwrap()
- .replace(&real_name, &crate_name),
- );
- if src.exists() && src.file_name() != dst.file_name() {
- fs::rename(&src, &dst)
- .chain_err(|| internal(format!("could not rename crate {:?}", src)))?;
- }
- }
-
- if rustc_dep_info_loc.exists() {
- fingerprint::translate_dep_info(&rustc_dep_info_loc, &dep_info_loc, &pkg_root, &cwd)
- .chain_err(|| {
- internal(format!(
- "could not parse/generate dep info at: {}",
- rustc_dep_info_loc.display()
- ))
- })?;
- }
-
- Ok(())
- }));
-
- // Add all relevant -L and -l flags from dependencies (now calculated and
- // present in `state`) to the command provided
- fn add_native_deps(
- rustc: &mut ProcessBuilder,
- build_state: &BuildMap,
- build_scripts: &BuildScripts,
- pass_l_flag: bool,
- current_id: &PackageId,
- ) -> CargoResult<()> {
- for key in build_scripts.to_link.iter() {
- let output = build_state.get(key).ok_or_else(|| {
- internal(format!(
- "couldn't find build state for {}/{:?}",
- key.0, key.1
- ))
- })?;
- for path in output.library_paths.iter() {
- rustc.arg("-L").arg(path);
- }
- if key.0 == *current_id {
- for cfg in &output.cfgs {
- rustc.arg("--cfg").arg(cfg);
- }
- if pass_l_flag {
- for name in output.library_links.iter() {
- rustc.arg("-l").arg(name);
- }
- }
- }
- }
- Ok(())
- }
-
- // Add all custom environment variables present in `state` (after they've
- // been put there by one of the `build_scripts`) to the command provided.
- fn add_custom_env(
- rustc: &mut ProcessBuilder,
- build_state: &BuildMap,
- current_id: &PackageId,
- kind: Kind,
- ) -> CargoResult<()> {
- let key = (current_id.clone(), kind);
- if let Some(output) = build_state.get(&key) {
- for &(ref name, ref value) in output.env.iter() {
- rustc.env(name, value);
- }
- }
- Ok(())
- }
-}
-
-/// Link the compiled target (often of form `foo-{metadata_hash}`) to the
-/// final target. This must happen during both "Fresh" and "Compile"
-fn link_targets<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
- fresh: bool,
-) -> CargoResult<Work> {
- let outputs = cx.outputs(unit)?;
- let export_dir = cx.files().export_dir(unit);
- let package_id = unit.pkg.package_id().clone();
- let target = unit.target.clone();
- let profile = unit.profile.clone();
- let features = cx.resolve
- .features_sorted(&package_id)
- .into_iter()
- .map(|s| s.to_owned())
- .collect();
- let json_messages = cx.build_config.json_messages;
-
- Ok(Work::new(move |_| {
- // If we're a "root crate", e.g. the target of this compilation, then we
- // hard link our outputs out of the `deps` directory into the directory
- // above. This means that `cargo build` will produce binaries in
- // `target/debug` which one probably expects.
- let mut destinations = vec![];
- for output in outputs.iter() {
- let src = &output.path;
- // This may have been a `cargo rustc` command which changes the
- // output, so the source may not actually exist.
- if !src.exists() {
- continue;
- }
- let dst = match output.hardlink.as_ref() {
- Some(dst) => dst,
- None => {
- destinations.push(src.display().to_string());
- continue;
- }
- };
- destinations.push(dst.display().to_string());
- hardlink_or_copy(src, dst)?;
- if let Some(ref path) = export_dir {
- if !path.exists() {
- fs::create_dir_all(path)?;
- }
-
- hardlink_or_copy(src, &path.join(dst.file_name().unwrap()))?;
- }
- }
-
- if json_messages {
- machine_message::emit(&machine_message::Artifact {
- package_id: &package_id,
- target: &target,
- profile: &profile,
- features,
- filenames: destinations,
- fresh,
- });
- }
- Ok(())
- }))
-}
-
-fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> {
- debug!("linking {} to {}", src.display(), dst.display());
- if is_same_file(src, dst).unwrap_or(false) {
- return Ok(());
- }
- if dst.exists() {
- paths::remove_file(&dst)?;
- }
-
- let link_result = if src.is_dir() {
- #[cfg(unix)]
- use std::os::unix::fs::symlink;
- #[cfg(target_os = "redox")]
- use std::os::redox::fs::symlink;
- #[cfg(windows)]
- use std::os::windows::fs::symlink_dir as symlink;
-
- let dst_dir = dst.parent().unwrap();
- let src = if src.starts_with(dst_dir) {
- src.strip_prefix(dst_dir).unwrap()
- } else {
- src
- };
- symlink(src, dst)
- } else {
- fs::hard_link(src, dst)
- };
- link_result
- .or_else(|err| {
- debug!("link failed {}. falling back to fs::copy", err);
- fs::copy(src, dst).map(|_| ())
- })
- .chain_err(|| {
- format!(
- "failed to link or copy `{}` to `{}`",
- src.display(),
- dst.display()
- )
- })?;
- Ok(())
-}
-
-fn load_build_deps(cx: &Context, unit: &Unit) -> Option<Arc<BuildScripts>> {
- cx.build_scripts.get(unit).cloned()
-}
-
-// For all plugin dependencies, add their -L paths (now calculated and
-// present in `state`) to the dynamic library load path for the command to
-// execute.
-fn add_plugin_deps(
- rustc: &mut ProcessBuilder,
- build_state: &BuildMap,
- build_scripts: &BuildScripts,
- root_output: &PathBuf,
-) -> CargoResult<()> {
- let var = util::dylib_path_envvar();
- let search_path = rustc.get_env(var).unwrap_or_default();
- let mut search_path = env::split_paths(&search_path).collect::<Vec<_>>();
- for id in build_scripts.plugins.iter() {
- let key = (id.clone(), Kind::Host);
- let output = build_state
- .get(&key)
- .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?;
- search_path.append(&mut filter_dynamic_search_path(
- output.library_paths.iter(),
- root_output,
- ));
- }
- let search_path = join_paths(&search_path, var)?;
- rustc.env(var, &search_path);
- Ok(())
-}
-
-// Determine paths to add to the dynamic search path from -L entries
-//
-// Strip off prefixes like "native=" or "framework=" and filter out directories
-// *not* inside our output directory since they are likely spurious and can cause
-// clashes with system shared libraries (issue #3366).
-fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec<PathBuf>
-where
- I: Iterator<Item = &'a PathBuf>,
-{
- let mut search_path = vec![];
- for dir in paths {
- let dir = match dir.to_str() {
- Some(s) => {
- let mut parts = s.splitn(2, '=');
- match (parts.next(), parts.next()) {
- (Some("native"), Some(path))
- | (Some("crate"), Some(path))
- | (Some("dependency"), Some(path))
- | (Some("framework"), Some(path))
- | (Some("all"), Some(path)) => path.into(),
- _ => dir.clone(),
- }
- }
- None => dir.clone(),
- };
- if dir.starts_with(&root_output) {
- search_path.push(dir);
- } else {
- debug!(
- "Not including path {} in runtime library search path because it is \
- outside target root {}",
- dir.display(),
- root_output.display()
- );
- }
- }
- search_path
-}
-
-fn prepare_rustc<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- crate_types: &[&str],
- unit: &Unit<'a>,
-) -> CargoResult<ProcessBuilder> {
- let mut base = cx.compilation.rustc_process(unit.pkg)?;
- base.inherit_jobserver(&cx.jobserver);
- build_base_args(cx, &mut base, unit, crate_types)?;
- build_deps_args(&mut base, cx, unit)?;
- Ok(base)
-}
-
-fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<Work> {
- let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?;
- rustdoc.inherit_jobserver(&cx.jobserver);
- rustdoc.arg("--crate-name").arg(&unit.target.crate_name());
- add_path_args(cx, unit, &mut rustdoc);
-
- if unit.kind != Kind::Host {
- if let Some(target) = cx.requested_target() {
- rustdoc.arg("--target").arg(target);
- }
- }
-
- let doc_dir = cx.files().out_dir(unit);
-
- // Create the documentation directory ahead of time as rustdoc currently has
- // a bug where concurrent invocations will race to create this directory if
- // it doesn't already exist.
- fs::create_dir_all(&doc_dir)?;
-
- rustdoc.arg("-o").arg(doc_dir);
-
- for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
- rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
- }
-
- let manifest = unit.pkg.manifest();
-
- if manifest.features().is_enabled(Feature::edition()) {
- rustdoc.arg("-Zunstable-options");
- rustdoc.arg(format!("--edition={}", &manifest.edition()));
- }
-
- if let Some(ref args) = unit.profile.rustdoc_args {
- rustdoc.args(args);
- }
-
- build_deps_args(&mut rustdoc, cx, unit)?;
-
- rustdoc.args(&cx.rustdocflags_args(unit)?);
-
- let name = unit.pkg.name().to_string();
- let build_state = cx.build_state.clone();
- let key = (unit.pkg.package_id().clone(), unit.kind);
-
- Ok(Work::new(move |state| {
- if let Some(output) = build_state.outputs.lock().unwrap().get(&key) {
- for cfg in output.cfgs.iter() {
- rustdoc.arg("--cfg").arg(cfg);
- }
- for &(ref name, ref value) in output.env.iter() {
- rustdoc.env(name, value);
- }
- }
- state.running(&rustdoc);
- rustdoc
- .exec()
- .chain_err(|| format!("Could not document `{}`.", name))?;
- Ok(())
- }))
-}
-
-// The path that we pass to rustc is actually fairly important because it will
-// show up in error messages (important for readability), debug information
-// (important for caching), etc. As a result we need to be pretty careful how we
-// actually invoke rustc.
-//
-// In general users don't expect `cargo build` to cause rebuilds if you change
-// directories. That could be if you just change directories in the project or
-// if you literally move the whole project wholesale to a new directory. As a
-// result we mostly don't factor in `cwd` to this calculation. Instead we try to
-// track the workspace as much as possible and we update the current directory
-// of rustc/rustdoc where approrpriate.
-//
-// The first returned value here is the argument to pass to rustc, and the
-// second is the cwd that rustc should operate in.
-fn path_args(cx: &Context, unit: &Unit) -> (PathBuf, PathBuf) {
- let ws_root = cx.ws.root();
- let src = unit.target.src_path();
- assert!(src.is_absolute());
- match src.strip_prefix(ws_root) {
- Ok(path) => (path.to_path_buf(), ws_root.to_path_buf()),
- Err(_) => (src.to_path_buf(), unit.pkg.root().to_path_buf()),
- }
-}
-
-fn add_path_args(cx: &Context, unit: &Unit, cmd: &mut ProcessBuilder) {
- let (arg, cwd) = path_args(cx, unit);
- cmd.arg(arg);
- cmd.cwd(cwd);
-}
-
-fn build_base_args<'a, 'cfg>(
- cx: &mut Context<'a, 'cfg>,
- cmd: &mut ProcessBuilder,
- unit: &Unit<'a>,
- crate_types: &[&str],
-) -> CargoResult<()> {
- let Profile {
- ref opt_level,
- ref lto,
- codegen_units,
- ref rustc_args,
- debuginfo,
- debug_assertions,
- overflow_checks,
- rpath,
- test,
- doc: _doc,
- run_custom_build,
- ref panic,
- check,
- ..
- } = *unit.profile;
- assert!(!run_custom_build);
-
- cmd.arg("--crate-name").arg(&unit.target.crate_name());
-
- add_path_args(cx, unit, cmd);
-
- match cx.config.shell().color_choice() {
- ColorChoice::Always => {
- cmd.arg("--color").arg("always");
- }
- ColorChoice::Never => {
- cmd.arg("--color").arg("never");
- }
- ColorChoice::CargoAuto => {}
- }
-
- if cx.build_config.json_messages {
- cmd.arg("--error-format").arg("json");
- }
-
- if !test {
- for crate_type in crate_types.iter() {
- cmd.arg("--crate-type").arg(crate_type);
- }
- }
-
- if check {
- cmd.arg("--emit=dep-info,metadata");
- } else {
- cmd.arg("--emit=dep-info,link");
- }
-
- let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build())
- || (crate_types.contains(&"dylib") && cx.ws.members().any(|p| p != unit.pkg));
- if prefer_dynamic {
- cmd.arg("-C").arg("prefer-dynamic");
- }
-
- if opt_level != "0" {
- cmd.arg("-C").arg(&format!("opt-level={}", opt_level));
- }
-
- // If a panic mode was configured *and* we're not ever going to be used in a
- // plugin, then we can compile with that panic mode.
- //
- // If we're used in a plugin then we'll eventually be linked to libsyntax
- // most likely which isn't compiled with a custom panic mode, so we'll just
- // get an error if we actually compile with that. This fixes `panic=abort`
- // crates which have plugin dependencies, but unfortunately means that
- // dependencies shared between the main application and plugins must be
- // compiled without `panic=abort`. This isn't so bad, though, as the main
- // application will still be compiled with `panic=abort`.
- if let Some(panic) = panic.as_ref() {
- if !cx.used_in_plugin.contains(unit) {
- cmd.arg("-C").arg(format!("panic={}", panic));
- }
- }
- let manifest = unit.pkg.manifest();
-
- if manifest.features().is_enabled(Feature::edition()) {
- cmd.arg(format!("-Zedition={}", manifest.edition()));
- }
-
- // Disable LTO for host builds as prefer_dynamic and it are mutually
- // exclusive.
- if unit.target.can_lto() && !unit.target.for_host() {
- match *lto {
- Lto::Bool(false) => {}
- Lto::Bool(true) => {
- cmd.args(&["-C", "lto"]);
- }
- Lto::Named(ref s) => {
- cmd.arg("-C").arg(format!("lto={}", s));
- }
- }
- }
-
- if let Some(n) = codegen_units {
- // There are some restrictions with LTO and codegen-units, so we
- // only add codegen units when LTO is not used.
- cmd.arg("-C").arg(&format!("codegen-units={}", n));
- }
-
- if let Some(debuginfo) = debuginfo {
- cmd.arg("-C").arg(format!("debuginfo={}", debuginfo));
- }
-
- if let Some(ref args) = *rustc_args {
- cmd.args(args);
- }
-
- // -C overflow-checks is implied by the setting of -C debug-assertions,
- // so we only need to provide -C overflow-checks if it differs from
- // the value of -C debug-assertions we would provide.
- if opt_level != "0" {
- if debug_assertions {
- cmd.args(&["-C", "debug-assertions=on"]);
- if !overflow_checks {
- cmd.args(&["-C", "overflow-checks=off"]);
- }
- } else if overflow_checks {
- cmd.args(&["-C", "overflow-checks=on"]);
- }
- } else if !debug_assertions {
- cmd.args(&["-C", "debug-assertions=off"]);
- if overflow_checks {
- cmd.args(&["-C", "overflow-checks=on"]);
- }
- } else if !overflow_checks {
- cmd.args(&["-C", "overflow-checks=off"]);
- }
-
- if test && unit.target.harness() {
- cmd.arg("--test");
- } else if test {
- cmd.arg("--cfg").arg("test");
- }
-
- // We ideally want deterministic invocations of rustc to ensure that
- // rustc-caching strategies like sccache are able to cache more, so sort the
- // feature list here.
- for feat in cx.resolve.features_sorted(unit.pkg.package_id()) {
- cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat));
- }
-
- match cx.files().metadata(unit) {
- Some(m) => {
- cmd.arg("-C").arg(&format!("metadata={}", m));
- cmd.arg("-C").arg(&format!("extra-filename=-{}", m));
- }
- None => {
- cmd.arg("-C")
- .arg(&format!("metadata={}", cx.files().target_short_hash(unit)));
- }
- }
-
- if rpath {
- cmd.arg("-C").arg("rpath");
- }
-
- cmd.arg("--out-dir").arg(&cx.files().out_dir(unit));
-
- fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) {
- if let Some(val) = val {
- let mut joined = OsString::from(prefix);
- joined.push(val);
- cmd.arg(key).arg(joined);
- }
- }
-
- if unit.kind == Kind::Target {
- opt(
- cmd,
- "--target",
- "",
- cx.requested_target().map(|s| s.as_ref()),
- );
- }
-
- opt(cmd, "-C", "ar=", cx.ar(unit.kind).map(|s| s.as_ref()));
- opt(
- cmd,
- "-C",
- "linker=",
- cx.linker(unit.kind).map(|s| s.as_ref()),
- );
- cmd.args(&cx.incremental_args(unit)?);
-
- Ok(())
-}
-
-fn build_deps_args<'a, 'cfg>(
- cmd: &mut ProcessBuilder,
- cx: &mut Context<'a, 'cfg>,
- unit: &Unit<'a>,
-) -> CargoResult<()> {
- cmd.arg("-L").arg(&{
- let mut deps = OsString::from("dependency=");
- deps.push(cx.files().deps_dir(unit));
- deps
- });
-
- // Be sure that the host path is also listed. This'll ensure that proc-macro
- // dependencies are correctly found (for reexported macros).
- if let Kind::Target = unit.kind {
- cmd.arg("-L").arg(&{
- let mut deps = OsString::from("dependency=");
- deps.push(cx.files().host_deps());
- deps
- });
- }
-
- let dep_targets = cx.dep_targets(unit);
-
- // If there is not one linkable target but should, rustc fails later
- // on if there is an `extern crate` for it. This may turn into a hard
- // error in the future, see PR #4797
- if !dep_targets
- .iter()
- .any(|u| !u.profile.doc && u.target.linkable())
- {
- if let Some(u) = dep_targets
- .iter()
- .find(|u| !u.profile.doc && u.target.is_lib())
- {
- cx.config.shell().warn(format!(
- "The package `{}` \
- provides no linkable target. The compiler might raise an error while compiling \
- `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \
- Cargo.toml. This warning might turn into a hard error in the future.",
- u.target.crate_name(),
- unit.target.crate_name(),
- u.target.crate_name()
- ))?;
- }
- }
-
- for dep in dep_targets {
- if dep.profile.run_custom_build {
- cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep));
- }
- if dep.target.linkable() && !dep.profile.doc {
- link_to(cmd, cx, unit, &dep)?;
- }
- }
-
- return Ok(());
-
- fn link_to<'a, 'cfg>(
- cmd: &mut ProcessBuilder,
- cx: &mut Context<'a, 'cfg>,
- current: &Unit<'a>,
- dep: &Unit<'a>,
- ) -> CargoResult<()> {
- for output in cx.outputs(dep)?.iter() {
- if output.flavor != FileFlavor::Linkable {
- continue;
- }
- let mut v = OsString::new();
-
- // Unfortunately right now Cargo doesn't have a great way to get a
- // 1:1 mapping of entries in `dependencies()` to the actual crate
- // we're depending on. Instead we're left to do some guesswork here
- // to figure out what `Dependency` the `dep` unit corresponds to in
- // `current` to see if we're renaming it.
- //
- // This I believe mostly works out for now, but we'll likely want
- // to tighten up this in the future.
- let name = current
- .pkg
- .dependencies()
- .iter()
- .filter(|d| d.matches_ignoring_source(dep.pkg.package_id()))
- .filter_map(|d| d.rename())
- .next();
-
- v.push(name.unwrap_or(&dep.target.crate_name()));
- v.push("=");
- v.push(cx.files().out_dir(dep));
- v.push(&path::MAIN_SEPARATOR.to_string());
- v.push(&output.path.file_name().unwrap());
- cmd.arg("--extern").arg(&v);
- }
- Ok(())
- }
-}
-
-fn envify(s: &str) -> String {
- s.chars()
- .flat_map(|c| c.to_uppercase())
- .map(|c| if c == '-' { '_' } else { c })
- .collect()
-}
-
-impl Kind {
- fn for_target(&self, target: &Target) -> Kind {
- // Once we start compiling for the `Host` kind we continue doing so, but
- // if we are a `Target` kind and then we start compiling for a target
- // that needs to be on the host we lift ourselves up to `Host`
- match *self {
- Kind::Host => Kind::Host,
- Kind::Target if target.for_host() => Kind::Host,
- Kind::Target => Kind::Target,
- }
- }
-}
+++ /dev/null
-use std::collections::{BTreeSet, HashSet};
-use std::io::{BufWriter, Write};
-use std::fs::File;
-use std::path::{Path, PathBuf};
-
-use super::{fingerprint, Context, Unit};
-use util::{internal, CargoResult};
-use util::paths;
-
-fn render_filename<P: AsRef<Path>>(path: P, basedir: Option<&str>) -> CargoResult<String> {
- let path = path.as_ref();
- let relpath = match basedir {
- None => path,
- Some(base) => match path.strip_prefix(base) {
- Ok(relpath) => relpath,
- _ => path,
- },
- };
- relpath
- .to_str()
- .ok_or_else(|| internal("path not utf-8"))
- .map(|f| f.replace(" ", "\\ "))
-}
-
-fn add_deps_for_unit<'a, 'b>(
- deps: &mut BTreeSet<PathBuf>,
- context: &mut Context<'a, 'b>,
- unit: &Unit<'a>,
- visited: &mut HashSet<Unit<'a>>,
-) -> CargoResult<()> {
- if !visited.insert(*unit) {
- return Ok(());
- }
-
- // units representing the execution of a build script don't actually
- // generate a dep info file, so we just keep on going below
- if !unit.profile.run_custom_build {
- // Add dependencies from rustc dep-info output (stored in fingerprint directory)
- let dep_info_loc = fingerprint::dep_info_loc(context, unit);
- if let Some(paths) = fingerprint::parse_dep_info(unit.pkg, &dep_info_loc)? {
- for path in paths {
- deps.insert(path);
- }
- } else {
- debug!(
- "can't find dep_info for {:?} {:?}",
- unit.pkg.package_id(),
- unit.profile
- );
- return Err(internal("dep_info missing"));
- }
- }
-
- // Add rerun-if-changed dependencies
- let key = (unit.pkg.package_id().clone(), unit.kind);
- if let Some(output) = context.build_state.outputs.lock().unwrap().get(&key) {
- for path in &output.rerun_if_changed {
- deps.insert(path.into());
- }
- }
-
- // Recursively traverse all transitive dependencies
- for dep_unit in context.dep_targets(unit).iter() {
- let source_id = dep_unit.pkg.package_id().source_id();
- if source_id.is_path() {
- add_deps_for_unit(deps, context, dep_unit, visited)?;
- }
- }
- Ok(())
-}
-
-pub fn output_depinfo<'a, 'b>(context: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> {
- let mut deps = BTreeSet::new();
- let mut visited = HashSet::new();
- let success = add_deps_for_unit(&mut deps, context, unit, &mut visited).is_ok();
- let basedir_string;
- let basedir = match context.config.get_path("build.dep-info-basedir")? {
- Some(value) => {
- basedir_string = value
- .val
- .as_os_str()
- .to_str()
- .ok_or_else(|| internal("build.dep-info-basedir path not utf-8"))?
- .to_string();
- Some(basedir_string.as_str())
- }
- None => None,
- };
- let deps = deps.iter()
- .map(|f| render_filename(f, basedir))
- .collect::<CargoResult<Vec<_>>>()?;
-
- for output in context.outputs(unit)?.iter() {
- if let Some(ref link_dst) = output.hardlink {
- let output_path = link_dst.with_extension("d");
- if success {
- let target_fn = render_filename(link_dst, basedir)?;
-
- // If nothing changed don't recreate the file which could alter
- // its mtime
- if let Ok(previous) = fingerprint::parse_rustc_dep_info(&output_path) {
- if previous.len() == 1 && previous[0].0 == target_fn && previous[0].1 == deps {
- continue;
- }
- }
-
- // Otherwise write it all out
- let mut outfile = BufWriter::new(File::create(output_path)?);
- write!(outfile, "{}:", target_fn)?;
- for dep in &deps {
- write!(outfile, " {}", dep)?;
- }
- writeln!(outfile, "")?;
-
- // dep-info generation failed, so delete output file. This will
- // usually cause the build system to always rerun the build
- // rule, which is correct if inefficient.
- } else if output_path.exists() {
- paths::remove_file(output_path)?;
- }
- }
- }
- Ok(())
-}
use std::ffi::{OsStr, OsString};
-use ops::{self, Compilation};
+use ops;
+use core::compiler::Compilation;
use util::{self, CargoTestError, ProcessError, Test};
use util::errors::CargoResult;
use core::Workspace;
pub use self::cargo_compile::{compile, compile_with_exec, compile_ws, CompileOptions};
pub use self::cargo_compile::{CompileFilter, CompileMode, FilterRule, MessageFormat, Packages};
pub use self::cargo_read_manifest::{read_package, read_packages};
-pub use self::cargo_rustc::{Compilation, Kind, Unit};
-pub use self::cargo_rustc::{is_bad_artifact_name, Context};
-pub use self::cargo_rustc::{BuildConfig, BuildOutput, TargetConfig};
-pub use self::cargo_rustc::{DefaultExecutor, Executor};
pub use self::cargo_run::run;
pub use self::cargo_install::{install, install_list, uninstall};
pub use self::cargo_new::{init, new, NewOptions, VersionControl};
mod cargo_pkgid;
mod cargo_read_manifest;
mod cargo_run;
-mod cargo_rustc;
mod cargo_test;
mod lockfile;
mod registry;
use std::fs::{self, DirEntry};
use std::collections::HashSet;
-use core::Target;
-use ops::is_bad_artifact_name;
+use core::{compiler, Target};
use util::errors::CargoResult;
use super::{LibKind, PathValue, StringOrBool, TomlBenchTarget, TomlBinTarget, TomlExampleTarget,
TomlLibTarget, TomlManifest, TomlTarget, TomlTestTarget};
));
}
- if is_bad_artifact_name(&name) {
+ if compiler::is_bad_artifact_name(&name) {
bail!("the binary target name `{}` is forbidden", name)
}
}