From 96b40ff0b7f5a065a6b57abc83e717bb3c85b3ec Mon Sep 17 00:00:00 2001 From: Vasudev Kamath Date: Sun, 29 Jul 2018 22:07:36 +0530 Subject: [PATCH] Import cargo_0.28.0.orig.tar.gz [dgit import orig cargo_0.28.0.orig.tar.gz] --- .gitignore | 14 + .travis.yml | 54 + ARCHITECTURE.md | 129 + CONTRIBUTING.md | 183 + Cargo.toml | 92 + LICENSE-APACHE | 201 + LICENSE-MIT | 23 + LICENSE-THIRD-PARTY | 1272 ++++ README.md | 90 + appveyor.yml | 21 + src/bin/cargo/cli.rs | 214 + src/bin/cargo/command_prelude.rs | 403 ++ src/bin/cargo/commands/bench.rs | 105 + src/bin/cargo/commands/build.rs | 63 + src/bin/cargo/commands/check.rs | 72 + src/bin/cargo/commands/clean.rs | 35 + src/bin/cargo/commands/doc.rs | 59 + src/bin/cargo/commands/fetch.rs | 34 + src/bin/cargo/commands/generate_lockfile.rs | 27 + src/bin/cargo/commands/git_checkout.rs | 36 + src/bin/cargo/commands/init.rs | 19 + src/bin/cargo/commands/install.rs | 122 + src/bin/cargo/commands/locate_project.rs | 33 + src/bin/cargo/commands/login.rs | 58 + src/bin/cargo/commands/metadata.rs | 53 + src/bin/cargo/commands/mod.rs | 101 + src/bin/cargo/commands/new.rs | 26 + src/bin/cargo/commands/owner.rs | 49 + src/bin/cargo/commands/package.rs | 48 + src/bin/cargo/commands/pkgid.rs | 41 + src/bin/cargo/commands/publish.rs | 46 + src/bin/cargo/commands/read_manifest.rs | 18 + src/bin/cargo/commands/run.rs | 69 + src/bin/cargo/commands/rustc.rs | 74 + src/bin/cargo/commands/rustdoc.rs | 66 + src/bin/cargo/commands/search.rs | 30 + src/bin/cargo/commands/test.rs | 137 + src/bin/cargo/commands/uninstall.rs | 26 + src/bin/cargo/commands/update.rs | 51 + src/bin/cargo/commands/verify_project.rs | 45 + src/bin/cargo/commands/version.rs | 12 + src/bin/cargo/commands/yank.rs | 43 + src/bin/cargo/main.rs | 212 + src/cargo/core/compiler/build_config.rs | 190 + src/cargo/core/compiler/build_context/mod.rs | 452 ++ .../compiler/build_context/target_info.rs | 291 + src/cargo/core/compiler/compilation.rs | 219 + .../compiler/context/compilation_files.rs | 454 ++ src/cargo/core/compiler/context/mod.rs | 491 ++ .../compiler/context/unit_dependencies.rs | 385 ++ src/cargo/core/compiler/custom_build.rs | 623 ++ src/cargo/core/compiler/fingerprint.rs | 841 +++ src/cargo/core/compiler/job.rs | 71 + src/cargo/core/compiler/job_queue.rs | 478 ++ src/cargo/core/compiler/layout.rs | 205 + src/cargo/core/compiler/mod.rs | 940 +++ src/cargo/core/compiler/output_depinfo.rs | 125 + src/cargo/core/dependency.rs | 415 ++ src/cargo/core/features.rs | 366 ++ src/cargo/core/interning.rs | 108 + src/cargo/core/manifest.rs | 677 ++ src/cargo/core/mod.rs | 29 + src/cargo/core/package.rs | 258 + src/cargo/core/package_id.rs | 198 + src/cargo/core/package_id_spec.rs | 378 ++ src/cargo/core/profiles.rs | 485 ++ src/cargo/core/registry.rs | 607 ++ src/cargo/core/resolver/conflict_cache.rs | 97 + src/cargo/core/resolver/context.rs | 425 ++ src/cargo/core/resolver/encode.rs | 431 ++ src/cargo/core/resolver/mod.rs | 1100 ++++ src/cargo/core/resolver/resolve.rs | 292 + src/cargo/core/resolver/types.rs | 398 ++ src/cargo/core/shell.rs | 355 ++ src/cargo/core/source/mod.rs | 201 + src/cargo/core/source/source_id.rs | 569 ++ src/cargo/core/summary.rs | 394 ++ src/cargo/core/workspace.rs | 855 +++ src/cargo/lib.rs | 240 + src/cargo/ops/cargo_clean.rs | 141 + src/cargo/ops/cargo_compile.rs | 751 +++ src/cargo/ops/cargo_doc.rs | 153 + src/cargo/ops/cargo_fetch.rs | 63 + src/cargo/ops/cargo_generate_lockfile.rs | 213 + src/cargo/ops/cargo_install.rs | 819 +++ src/cargo/ops/cargo_new.rs | 672 ++ src/cargo/ops/cargo_output_metadata.rs | 117 + src/cargo/ops/cargo_package.rs | 386 ++ src/cargo/ops/cargo_pkgid.rs | 16 + src/cargo/ops/cargo_read_manifest.rs | 199 + src/cargo/ops/cargo_run.rs | 105 + src/cargo/ops/cargo_test.rs | 241 + src/cargo/ops/lockfile.rs | 161 + src/cargo/ops/mod.rs | 40 + src/cargo/ops/registry.rs | 622 ++ src/cargo/ops/resolve.rs | 559 ++ src/cargo/sources/config.rs | 244 + src/cargo/sources/directory.rs | 201 + src/cargo/sources/git/mod.rs | 4 + src/cargo/sources/git/source.rs | 278 + src/cargo/sources/git/utils.rs | 870 +++ src/cargo/sources/mod.rs | 13 + src/cargo/sources/path.rs | 541 ++ src/cargo/sources/registry/index.rs | 214 + src/cargo/sources/registry/local.rs | 103 + src/cargo/sources/registry/mod.rs | 498 ++ src/cargo/sources/registry/remote.rs | 289 + src/cargo/sources/replaced.rs | 73 + src/cargo/util/cfg.rs | 263 + src/cargo/util/config.rs | 1048 +++ src/cargo/util/dependency_queue.rs | 231 + src/cargo/util/errors.rs | 283 + src/cargo/util/flock.rs | 346 + src/cargo/util/graph.rs | 136 + src/cargo/util/hex.rs | 27 + src/cargo/util/important_paths.rs | 32 + src/cargo/util/job.rs | 271 + src/cargo/util/lev_distance.rs | 56 + src/cargo/util/machine_message.rs | 70 + src/cargo/util/mod.rs | 44 + src/cargo/util/network.rs | 106 + src/cargo/util/paths.rs | 292 + src/cargo/util/process_builder.rs | 345 + src/cargo/util/profile.rs | 89 + src/cargo/util/progress.rs | 136 + src/cargo/util/read2.rs | 185 + src/cargo/util/rustc.rs | 245 + src/cargo/util/sha256.rs | 23 + src/cargo/util/to_semver.rs | 33 + src/cargo/util/to_url.rs | 23 + src/cargo/util/toml/mod.rs | 1416 +++++ src/cargo/util/toml/targets.rs | 737 +++ src/cargo/util/vcs.rs | 78 + src/crates-io/Cargo.toml | 21 + src/crates-io/LICENSE-APACHE | 1 + src/crates-io/LICENSE-MIT | 1 + src/crates-io/lib.rs | 335 + src/doc/README.md | 47 + src/doc/book.toml | 2 + src/doc/src/SUMMARY.md | 32 + src/doc/src/faq.md | 193 + src/doc/src/getting-started/first-steps.md | 70 + src/doc/src/getting-started/index.md | 6 + src/doc/src/getting-started/installation.md | 37 + src/doc/src/guide/build-cache.md | 14 + src/doc/src/guide/cargo-toml-vs-cargo-lock.md | 103 + src/doc/src/guide/continuous-integration.md | 21 + src/doc/src/guide/creating-a-new-project.md | 87 + src/doc/src/guide/dependencies.md | 90 + src/doc/src/guide/index.md | 14 + src/doc/src/guide/project-layout.md | 35 + src/doc/src/guide/tests.md | 39 + src/doc/src/guide/why-cargo-exists.md | 12 + .../guide/working-on-an-existing-project.md | 22 + src/doc/src/images/Cargo-Logo-Small.png | Bin 0 -> 58168 bytes src/doc/src/images/auth-level-acl.png | Bin 0 -> 90300 bytes src/doc/src/images/org-level-acl.png | Bin 0 -> 76572 bytes src/doc/src/index.md | 30 + src/doc/src/reference/build-scripts.md | 565 ++ src/doc/src/reference/config.md | 143 + .../src/reference/environment-variables.md | 144 + src/doc/src/reference/external-tools.md | 103 + src/doc/src/reference/index.md | 14 + src/doc/src/reference/manifest.md | 830 +++ src/doc/src/reference/pkgid-spec.md | 44 + src/doc/src/reference/publishing.md | 222 + src/doc/src/reference/source-replacement.md | 134 + .../src/reference/specifying-dependencies.md | 536 ++ src/doc/src/reference/unstable.md | 256 + src/doc/theme/favicon.png | Bin 0 -> 5430 bytes src/etc/_cargo | 544 ++ src/etc/cargo.bashcomp.sh | 246 + src/etc/man/cargo-bench.1 | 143 + src/etc/man/cargo-build.1 | 132 + src/etc/man/cargo-check.1 | 132 + src/etc/man/cargo-clean.1 | 82 + src/etc/man/cargo-doc.1 | 109 + src/etc/man/cargo-fetch.1 | 52 + src/etc/man/cargo-generate-lockfile.1 | 41 + src/etc/man/cargo-init.1 | 68 + src/etc/man/cargo-install.1 | 161 + src/etc/man/cargo-login.1 | 41 + src/etc/man/cargo-metadata.1 | 71 + src/etc/man/cargo-new.1 | 68 + src/etc/man/cargo-owner.1 | 88 + src/etc/man/cargo-package.1 | 59 + src/etc/man/cargo-pkgid.1 | 75 + src/etc/man/cargo-publish.1 | 59 + src/etc/man/cargo-run.1 | 103 + src/etc/man/cargo-rustc.1 | 126 + src/etc/man/cargo-rustdoc.1 | 124 + src/etc/man/cargo-search.1 | 49 + src/etc/man/cargo-test.1 | 172 + src/etc/man/cargo-uninstall.1 | 56 + src/etc/man/cargo-update.1 | 80 + src/etc/man/cargo-version.1 | 31 + src/etc/man/cargo-yank.1 | 68 + src/etc/man/cargo.1 | 206 + tests/testsuite/alt_registry.rs | 590 ++ tests/testsuite/bad_config.rs | 1427 +++++ tests/testsuite/bad_manifest_path.rs | 391 ++ tests/testsuite/bench.rs | 1919 ++++++ tests/testsuite/build.rs | 5638 +++++++++++++++++ tests/testsuite/build_auth.rs | 269 + tests/testsuite/build_lib.rs | 112 + tests/testsuite/build_script.rs | 4004 ++++++++++++ tests/testsuite/build_script_env.rs | 140 + tests/testsuite/cargo_alias_config.rs | 206 + tests/testsuite/cargo_command.rs | 345 + tests/testsuite/cargo_features.rs | 327 + tests/testsuite/cargotest/install.rs | 35 + tests/testsuite/cargotest/mod.rs | 93 + .../cargotest/support/cross_compile.rs | 137 + tests/testsuite/cargotest/support/git.rs | 164 + tests/testsuite/cargotest/support/mod.rs | 1084 ++++ tests/testsuite/cargotest/support/paths.rs | 165 + tests/testsuite/cargotest/support/publish.rs | 62 + tests/testsuite/cargotest/support/registry.rs | 372 ++ tests/testsuite/cfg.rs | 492 ++ tests/testsuite/check-style.sh | 3 + tests/testsuite/check.rs | 945 +++ tests/testsuite/clean.rs | 357 ++ tests/testsuite/concurrent.rs | 639 ++ tests/testsuite/config.rs | 33 + tests/testsuite/corrupt_git.rs | 187 + tests/testsuite/cross_compile.rs | 1407 ++++ tests/testsuite/cross_publish.rs | 136 + tests/testsuite/custom_target.rs | 170 + tests/testsuite/death.rs | 146 + tests/testsuite/dep_info.rs | 126 + tests/testsuite/directory.rs | 815 +++ tests/testsuite/doc.rs | 1540 +++++ tests/testsuite/features.rs | 2072 ++++++ tests/testsuite/fetch.rs | 156 + tests/testsuite/freshness.rs | 1245 ++++ tests/testsuite/generate_lockfile.rs | 252 + tests/testsuite/git.rs | 3202 ++++++++++ tests/testsuite/hamcrest.rs | 101 + tests/testsuite/init.rs | 658 ++ tests/testsuite/install.rs | 1535 +++++ tests/testsuite/jobserver.rs | 221 + tests/testsuite/local_registry.rs | 491 ++ tests/testsuite/lockfile_compat.rs | 507 ++ tests/testsuite/login.rs | 195 + tests/testsuite/main.rs | 95 + tests/testsuite/metadata.rs | 1029 +++ tests/testsuite/net_config.rs | 75 + tests/testsuite/new.rs | 562 ++ tests/testsuite/out_dir.rs | 295 + tests/testsuite/overrides.rs | 1627 +++++ tests/testsuite/package.rs | 1426 +++++ tests/testsuite/patch.rs | 1160 ++++ tests/testsuite/path.rs | 1312 ++++ tests/testsuite/plugins.rs | 474 ++ tests/testsuite/proc_macro.rs | 301 + tests/testsuite/profile_overrides.rs | 473 ++ tests/testsuite/profile_targets.rs | 707 +++ tests/testsuite/profiles.rs | 398 ++ tests/testsuite/publish.rs | 874 +++ tests/testsuite/read_manifest.rs | 113 + tests/testsuite/registry.rs | 1802 ++++++ tests/testsuite/rename_deps.rs | 365 ++ tests/testsuite/required_features.rs | 1355 ++++ tests/testsuite/resolve.rs | 991 +++ tests/testsuite/run.rs | 1316 ++++ tests/testsuite/rustc.rs | 688 ++ tests/testsuite/rustc_info_cache.rs | 137 + tests/testsuite/rustdoc.rs | 253 + tests/testsuite/rustdocflags.rs | 163 + tests/testsuite/rustflags.rs | 1621 +++++ tests/testsuite/search.rs | 341 + tests/testsuite/small_fd_limits.rs | 118 + tests/testsuite/test.rs | 4140 ++++++++++++ tests/testsuite/tool_paths.rs | 256 + tests/testsuite/update.rs | 415 ++ tests/testsuite/verify_project.rs | 57 + tests/testsuite/version.rs | 51 + tests/testsuite/warn_on_failure.rs | 111 + tests/testsuite/workspaces.rs | 2370 +++++++ 279 files changed, 104221 insertions(+) create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 ARCHITECTURE.md create mode 100644 CONTRIBUTING.md create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 LICENSE-THIRD-PARTY create mode 100644 README.md create mode 100644 appveyor.yml create mode 100644 src/bin/cargo/cli.rs create mode 100644 src/bin/cargo/command_prelude.rs create mode 100644 src/bin/cargo/commands/bench.rs create mode 100644 src/bin/cargo/commands/build.rs create mode 100644 src/bin/cargo/commands/check.rs create mode 100644 src/bin/cargo/commands/clean.rs create mode 100644 src/bin/cargo/commands/doc.rs create mode 100644 src/bin/cargo/commands/fetch.rs create mode 100644 src/bin/cargo/commands/generate_lockfile.rs create mode 100644 src/bin/cargo/commands/git_checkout.rs create mode 100644 src/bin/cargo/commands/init.rs create mode 100644 src/bin/cargo/commands/install.rs create mode 100644 src/bin/cargo/commands/locate_project.rs create mode 100644 src/bin/cargo/commands/login.rs create mode 100644 src/bin/cargo/commands/metadata.rs create mode 100644 src/bin/cargo/commands/mod.rs create mode 100644 src/bin/cargo/commands/new.rs create mode 100644 src/bin/cargo/commands/owner.rs create mode 100644 src/bin/cargo/commands/package.rs create mode 100644 src/bin/cargo/commands/pkgid.rs create mode 100644 src/bin/cargo/commands/publish.rs create mode 100644 src/bin/cargo/commands/read_manifest.rs create mode 100644 src/bin/cargo/commands/run.rs create mode 100644 src/bin/cargo/commands/rustc.rs create mode 100644 src/bin/cargo/commands/rustdoc.rs create mode 100644 src/bin/cargo/commands/search.rs create mode 100644 src/bin/cargo/commands/test.rs create mode 100644 src/bin/cargo/commands/uninstall.rs create mode 100644 src/bin/cargo/commands/update.rs create mode 100644 src/bin/cargo/commands/verify_project.rs create mode 100644 src/bin/cargo/commands/version.rs create mode 100644 src/bin/cargo/commands/yank.rs create mode 100644 src/bin/cargo/main.rs create mode 100644 src/cargo/core/compiler/build_config.rs create mode 100644 src/cargo/core/compiler/build_context/mod.rs create mode 100644 src/cargo/core/compiler/build_context/target_info.rs create mode 100644 src/cargo/core/compiler/compilation.rs create mode 100644 src/cargo/core/compiler/context/compilation_files.rs create mode 100644 src/cargo/core/compiler/context/mod.rs create mode 100644 src/cargo/core/compiler/context/unit_dependencies.rs create mode 100644 src/cargo/core/compiler/custom_build.rs create mode 100644 src/cargo/core/compiler/fingerprint.rs create mode 100644 src/cargo/core/compiler/job.rs create mode 100644 src/cargo/core/compiler/job_queue.rs create mode 100644 src/cargo/core/compiler/layout.rs create mode 100644 src/cargo/core/compiler/mod.rs create mode 100644 src/cargo/core/compiler/output_depinfo.rs create mode 100644 src/cargo/core/dependency.rs create mode 100644 src/cargo/core/features.rs create mode 100644 src/cargo/core/interning.rs create mode 100644 src/cargo/core/manifest.rs create mode 100644 src/cargo/core/mod.rs create mode 100644 src/cargo/core/package.rs create mode 100644 src/cargo/core/package_id.rs create mode 100644 src/cargo/core/package_id_spec.rs create mode 100644 src/cargo/core/profiles.rs create mode 100644 src/cargo/core/registry.rs create mode 100644 src/cargo/core/resolver/conflict_cache.rs create mode 100644 src/cargo/core/resolver/context.rs create mode 100644 src/cargo/core/resolver/encode.rs create mode 100644 src/cargo/core/resolver/mod.rs create mode 100644 src/cargo/core/resolver/resolve.rs create mode 100644 src/cargo/core/resolver/types.rs create mode 100644 src/cargo/core/shell.rs create mode 100644 src/cargo/core/source/mod.rs create mode 100644 src/cargo/core/source/source_id.rs create mode 100644 src/cargo/core/summary.rs create mode 100644 src/cargo/core/workspace.rs create mode 100644 src/cargo/lib.rs create mode 100644 src/cargo/ops/cargo_clean.rs create mode 100644 src/cargo/ops/cargo_compile.rs create mode 100644 src/cargo/ops/cargo_doc.rs create mode 100644 src/cargo/ops/cargo_fetch.rs create mode 100644 src/cargo/ops/cargo_generate_lockfile.rs create mode 100644 src/cargo/ops/cargo_install.rs create mode 100644 src/cargo/ops/cargo_new.rs create mode 100644 src/cargo/ops/cargo_output_metadata.rs create mode 100644 src/cargo/ops/cargo_package.rs create mode 100644 src/cargo/ops/cargo_pkgid.rs create mode 100644 src/cargo/ops/cargo_read_manifest.rs create mode 100644 src/cargo/ops/cargo_run.rs create mode 100644 src/cargo/ops/cargo_test.rs create mode 100644 src/cargo/ops/lockfile.rs create mode 100644 src/cargo/ops/mod.rs create mode 100644 src/cargo/ops/registry.rs create mode 100644 src/cargo/ops/resolve.rs create mode 100644 src/cargo/sources/config.rs create mode 100644 src/cargo/sources/directory.rs create mode 100644 src/cargo/sources/git/mod.rs create mode 100644 src/cargo/sources/git/source.rs create mode 100644 src/cargo/sources/git/utils.rs create mode 100644 src/cargo/sources/mod.rs create mode 100644 src/cargo/sources/path.rs create mode 100644 src/cargo/sources/registry/index.rs create mode 100644 src/cargo/sources/registry/local.rs create mode 100644 src/cargo/sources/registry/mod.rs create mode 100644 src/cargo/sources/registry/remote.rs create mode 100644 src/cargo/sources/replaced.rs create mode 100644 src/cargo/util/cfg.rs create mode 100644 src/cargo/util/config.rs create mode 100644 src/cargo/util/dependency_queue.rs create mode 100644 src/cargo/util/errors.rs create mode 100644 src/cargo/util/flock.rs create mode 100644 src/cargo/util/graph.rs create mode 100644 src/cargo/util/hex.rs create mode 100644 src/cargo/util/important_paths.rs create mode 100644 src/cargo/util/job.rs create mode 100644 src/cargo/util/lev_distance.rs create mode 100644 src/cargo/util/machine_message.rs create mode 100644 src/cargo/util/mod.rs create mode 100644 src/cargo/util/network.rs create mode 100644 src/cargo/util/paths.rs create mode 100644 src/cargo/util/process_builder.rs create mode 100644 src/cargo/util/profile.rs create mode 100644 src/cargo/util/progress.rs create mode 100644 src/cargo/util/read2.rs create mode 100644 src/cargo/util/rustc.rs create mode 100644 src/cargo/util/sha256.rs create mode 100644 src/cargo/util/to_semver.rs create mode 100644 src/cargo/util/to_url.rs create mode 100644 src/cargo/util/toml/mod.rs create mode 100644 src/cargo/util/toml/targets.rs create mode 100644 src/cargo/util/vcs.rs create mode 100644 src/crates-io/Cargo.toml create mode 120000 src/crates-io/LICENSE-APACHE create mode 120000 src/crates-io/LICENSE-MIT create mode 100644 src/crates-io/lib.rs create mode 100644 src/doc/README.md create mode 100644 src/doc/book.toml create mode 100644 src/doc/src/SUMMARY.md create mode 100644 src/doc/src/faq.md create mode 100644 src/doc/src/getting-started/first-steps.md create mode 100644 src/doc/src/getting-started/index.md create mode 100644 src/doc/src/getting-started/installation.md create mode 100644 src/doc/src/guide/build-cache.md create mode 100644 src/doc/src/guide/cargo-toml-vs-cargo-lock.md create mode 100644 src/doc/src/guide/continuous-integration.md create mode 100644 src/doc/src/guide/creating-a-new-project.md create mode 100644 src/doc/src/guide/dependencies.md create mode 100644 src/doc/src/guide/index.md create mode 100644 src/doc/src/guide/project-layout.md create mode 100644 src/doc/src/guide/tests.md create mode 100644 src/doc/src/guide/why-cargo-exists.md create mode 100644 src/doc/src/guide/working-on-an-existing-project.md create mode 100644 src/doc/src/images/Cargo-Logo-Small.png create mode 100644 src/doc/src/images/auth-level-acl.png create mode 100644 src/doc/src/images/org-level-acl.png create mode 100644 src/doc/src/index.md create mode 100644 src/doc/src/reference/build-scripts.md create mode 100644 src/doc/src/reference/config.md create mode 100644 src/doc/src/reference/environment-variables.md create mode 100644 src/doc/src/reference/external-tools.md create mode 100644 src/doc/src/reference/index.md create mode 100644 src/doc/src/reference/manifest.md create mode 100644 src/doc/src/reference/pkgid-spec.md create mode 100644 src/doc/src/reference/publishing.md create mode 100644 src/doc/src/reference/source-replacement.md create mode 100644 src/doc/src/reference/specifying-dependencies.md create mode 100644 src/doc/src/reference/unstable.md create mode 100644 src/doc/theme/favicon.png create mode 100644 src/etc/_cargo create mode 100644 src/etc/cargo.bashcomp.sh create mode 100644 src/etc/man/cargo-bench.1 create mode 100644 src/etc/man/cargo-build.1 create mode 100644 src/etc/man/cargo-check.1 create mode 100644 src/etc/man/cargo-clean.1 create mode 100644 src/etc/man/cargo-doc.1 create mode 100644 src/etc/man/cargo-fetch.1 create mode 100644 src/etc/man/cargo-generate-lockfile.1 create mode 100644 src/etc/man/cargo-init.1 create mode 100644 src/etc/man/cargo-install.1 create mode 100644 src/etc/man/cargo-login.1 create mode 100644 src/etc/man/cargo-metadata.1 create mode 100644 src/etc/man/cargo-new.1 create mode 100644 src/etc/man/cargo-owner.1 create mode 100644 src/etc/man/cargo-package.1 create mode 100644 src/etc/man/cargo-pkgid.1 create mode 100644 src/etc/man/cargo-publish.1 create mode 100644 src/etc/man/cargo-run.1 create mode 100644 src/etc/man/cargo-rustc.1 create mode 100644 src/etc/man/cargo-rustdoc.1 create mode 100644 src/etc/man/cargo-search.1 create mode 100644 src/etc/man/cargo-test.1 create mode 100644 src/etc/man/cargo-uninstall.1 create mode 100644 src/etc/man/cargo-update.1 create mode 100644 src/etc/man/cargo-version.1 create mode 100644 src/etc/man/cargo-yank.1 create mode 100644 src/etc/man/cargo.1 create mode 100644 tests/testsuite/alt_registry.rs create mode 100644 tests/testsuite/bad_config.rs create mode 100644 tests/testsuite/bad_manifest_path.rs create mode 100644 tests/testsuite/bench.rs create mode 100644 tests/testsuite/build.rs create mode 100644 tests/testsuite/build_auth.rs create mode 100644 tests/testsuite/build_lib.rs create mode 100644 tests/testsuite/build_script.rs create mode 100644 tests/testsuite/build_script_env.rs create mode 100644 tests/testsuite/cargo_alias_config.rs create mode 100644 tests/testsuite/cargo_command.rs create mode 100644 tests/testsuite/cargo_features.rs create mode 100644 tests/testsuite/cargotest/install.rs create mode 100644 tests/testsuite/cargotest/mod.rs create mode 100644 tests/testsuite/cargotest/support/cross_compile.rs create mode 100644 tests/testsuite/cargotest/support/git.rs create mode 100644 tests/testsuite/cargotest/support/mod.rs create mode 100644 tests/testsuite/cargotest/support/paths.rs create mode 100644 tests/testsuite/cargotest/support/publish.rs create mode 100644 tests/testsuite/cargotest/support/registry.rs create mode 100644 tests/testsuite/cfg.rs create mode 100755 tests/testsuite/check-style.sh create mode 100644 tests/testsuite/check.rs create mode 100644 tests/testsuite/clean.rs create mode 100644 tests/testsuite/concurrent.rs create mode 100644 tests/testsuite/config.rs create mode 100644 tests/testsuite/corrupt_git.rs create mode 100644 tests/testsuite/cross_compile.rs create mode 100644 tests/testsuite/cross_publish.rs create mode 100644 tests/testsuite/custom_target.rs create mode 100644 tests/testsuite/death.rs create mode 100644 tests/testsuite/dep_info.rs create mode 100644 tests/testsuite/directory.rs create mode 100644 tests/testsuite/doc.rs create mode 100644 tests/testsuite/features.rs create mode 100644 tests/testsuite/fetch.rs create mode 100644 tests/testsuite/freshness.rs create mode 100644 tests/testsuite/generate_lockfile.rs create mode 100644 tests/testsuite/git.rs create mode 100644 tests/testsuite/hamcrest.rs create mode 100644 tests/testsuite/init.rs create mode 100644 tests/testsuite/install.rs create mode 100644 tests/testsuite/jobserver.rs create mode 100644 tests/testsuite/local_registry.rs create mode 100644 tests/testsuite/lockfile_compat.rs create mode 100644 tests/testsuite/login.rs create mode 100644 tests/testsuite/main.rs create mode 100644 tests/testsuite/metadata.rs create mode 100644 tests/testsuite/net_config.rs create mode 100644 tests/testsuite/new.rs create mode 100644 tests/testsuite/out_dir.rs create mode 100644 tests/testsuite/overrides.rs create mode 100644 tests/testsuite/package.rs create mode 100644 tests/testsuite/patch.rs create mode 100644 tests/testsuite/path.rs create mode 100644 tests/testsuite/plugins.rs create mode 100644 tests/testsuite/proc_macro.rs create mode 100644 tests/testsuite/profile_overrides.rs create mode 100644 tests/testsuite/profile_targets.rs create mode 100644 tests/testsuite/profiles.rs create mode 100644 tests/testsuite/publish.rs create mode 100644 tests/testsuite/read_manifest.rs create mode 100644 tests/testsuite/registry.rs create mode 100644 tests/testsuite/rename_deps.rs create mode 100644 tests/testsuite/required_features.rs create mode 100644 tests/testsuite/resolve.rs create mode 100644 tests/testsuite/run.rs create mode 100644 tests/testsuite/rustc.rs create mode 100644 tests/testsuite/rustc_info_cache.rs create mode 100644 tests/testsuite/rustdoc.rs create mode 100644 tests/testsuite/rustdocflags.rs create mode 100644 tests/testsuite/rustflags.rs create mode 100644 tests/testsuite/search.rs create mode 100644 tests/testsuite/small_fd_limits.rs create mode 100644 tests/testsuite/test.rs create mode 100644 tests/testsuite/tool_paths.rs create mode 100644 tests/testsuite/update.rs create mode 100644 tests/testsuite/verify_project.rs create mode 100644 tests/testsuite/version.rs create mode 100644 tests/testsuite/warn_on_failure.rs create mode 100644 tests/testsuite/workspaces.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..85e363a37 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +/target +Cargo.lock +.cargo +/config.stamp +/Makefile +/config.mk +src/doc/build +src/etc/*.pyc +src/registry/target +rustc +__pycache__ +.idea/ +*.iml +*.swp diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..f03698190 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,54 @@ +language: rust +rust: stable +sudo: required +dist: trusty + +git: + depth: 1 + +# Using 'cache: cargo' to cache target/ and all of $HOME/.cargo/ +# doesn't work well: the cache is large and it takes several minutes +# to move it to and from S3. So instead we only cache the mdbook +# binary. +cache: + directories: + - $HOME/.cargo/bin/ + +matrix: + include: + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + - env: TARGET=x86_64-apple-darwin + ALT=i686-apple-darwin + os: osx + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: beta + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: nightly + install: + - mdbook --help || cargo install mdbook --force + script: + - cargo test + - cargo doc --no-deps + - (cd src/doc && mdbook build --dest-dir ../../target/doc) + + exclude: + - rust: stable + +before_script: + - rustup target add $ALT +script: + - cargo test + +notifications: + email: + on_success: never + +addons: + apt: + packages: + - gcc-multilib diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 000000000..68d06ff66 --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,129 @@ +# Cargo Architecture + +This document gives a high level overview of Cargo internals. You may +find it useful if you want to contribute to Cargo or if you are +interested in the inner workings of Cargo. + + +## Subcommands + +Cargo is organized as a set of `clap` subcommands. All subcommands live in +`src/bin/commands` directory. `src/bin/cargo.rs` is the entry point. + +A typical subcommand, such as `src/bin/commands/build.rs`, parses command line +options, reads the configuration files, discovers the Cargo project in +the current directory and delegates the actual implementation to one +of the functions in `src/cargo/ops/mod.rs`. This short file is a good +place to find out about most of the things that Cargo can do. + + +## Important Data Structures + +There are some important data structures which are used throughout +Cargo. + +`Config` is available almost everywhere and holds "global" +information, such as `CARGO_HOME` or configuration from +`.cargo/config` files. The `shell` method of `Config` is the entry +point for printing status messages and other info to the console. + +`Workspace` is the description of the workspace for the current +working directory. Each workspace contains at least one +`Package`. Each package corresponds to a single `Cargo.toml`, and may +define several `Target`s, such as the library, binaries, integration +test or examples. Targets are crates (each target defines a crate +root, like `src/lib.rs` or `examples/foo.rs`) and are what is actually +compiled by `rustc`. + +A typical package defines the single library target and several +auxiliary ones. Packages are a unit of dependency in Cargo, and when +package `foo` depends on package `bar`, that means that each target +from `foo` needs the library target from `bar`. + +`PackageId` is the unique identifier of a (possibly remote) +package. It consist of three components: name, version and source +id. Source is the place where the source code for package comes +from. Typical sources are crates.io, a git repository or a folder on +the local hard drive. + +`Resolve` is the representation of a directed acyclic graph of package +dependencies, which uses `PackageId`s for nodes. This is the data +structure that is saved to the lock file. If there is no lockfile, +Cargo constructs a resolve by finding a graph of packages which +matches declared dependency specification according to semver. + + +## Persistence + +Cargo is a non-daemon command line application, which means that all +the information used by Cargo must be persisted on the hard drive. The +main sources of information are `Cargo.toml` and `Cargo.lock` files, +`.cargo/config` configuration files and the globally shared registry +of packages downloaded from crates.io, usually located at +`~/.cargo/registry`. See `src/sources/registry` for the specifics of +the registry storage format. + + +## Concurrency + +Cargo is mostly single threaded. The only concurrency inside a single +instance of Cargo happens during compilation, when several instances +of `rustc` are invoked in parallel to build independent +targets. However there can be several different instances of Cargo +process running concurrently on the system. Cargo guarantees that this +is always safe by using file locks when accessing potentially shared +data like the registry or the target directory. + + +## Tests + +Cargo has an impressive test suite located in the `tests` folder. Most +of the test are integration: a project structure with `Cargo.toml` and +rust source code is created in a temporary directory, `cargo` binary +is invoked via `std::process::Command` and then stdout and stderr are +verified against the expected output. To simplify testing, several +macros of the form `[MACRO]` are used in the expected output. For +example, `[..]` matches any string and `[/]` matches `/` on Unixes and +`\` on windows. + +To see stdout and stderr streams of the subordinate process, add `.stream()` +call to `execs()`: + +```rust +// Before +assert_that( + p.cargo("run"), + execs().with_status(0) +); + +// After +assert_that( + p.cargo("run"), + execs().stream().with_status(0) +); +``` + +Alternatively to build and run a custom version of cargo simply run `cargo build` +and execute `target/debug/cargo`. + +Because the test suite has `#![deny(warnings)]` at times you might find it +convenient to override this with `RUSTFLAGS`, for example +`RUSTFLAGS="--cap-lints warn" cargo build`. + +## Logging + +Cargo uses [`env_logger`](https://docs.rs/env_logger/*/env_logger/), so you can set +`RUST_LOG` environment variable to get the logs. This is useful both for diagnosing +bugs in stable Cargo and for local development. Cargo also has internal hierarchical +profiling infrastructure, which is activated via `CARGO_PROFILE` variable + +``` +# Outputs all logs with levels debug and higher +$ RUST_LOG=debug cargo generate-lockfile + +# Don't forget that you can filter by module as well +$ RUST_LOG=cargo::core::resolver=trace cargo generate-lockfile + +# Output first three levels of profiling info +$ CARGO_PROFILE=3 cargo generate-lockfile +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..fc5a590b2 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,183 @@ +# Contributing to Cargo + +Thank you for your interest in contributing to Cargo! Good places to +start are this document, [ARCHITECTURE.md](ARCHITECTURE.md), which +describes the high-level structure of Cargo and [E-easy] bugs on the +issue tracker. + +If you have a general question about Cargo or it's internals, feel free to ask +on [IRC]. + +## Code of Conduct + +All contributors are expected to follow our [Code of Conduct]. + +## Bug reports + +We can't fix what we don't know about, so please report problems liberally. This +includes problems with understanding the documentation, unhelpful error messages +and unexpected behavior. + +**If you think that you have identified an issue with Cargo that might compromise +its users' security, please do not open a public issue on GitHub. Instead, +we ask you to refer to Rust's [security policy].** + +Opening an issue is as easy as following [this link][new-issues] and filling out +the fields. Here's a template that you can use to file an issue, though it's not +necessary to use it exactly: + + + + I tried this: + + I expected to see this happen: + + Instead, this happened: + + I'm using + +All three components are important: what you did, what you expected, what +happened instead. Please use https://gist.github.com/ if your examples run long. + +## Working on issues + +If you're looking for somewhere to start, check out the [E-easy][E-Easy] and +[E-mentor][E-mentor] tags. + +Feel free to ask for guidelines on how to tackle a problem on [IRC] or open a +[new issue][new-issues]. This is especially important if you want to add new +features to Cargo or make large changes to the already existing code-base. +Cargo's core developers will do their best to provide help. + +If you start working on an already-filed issue, post a comment on this issue to +let people know that somebody is working it. Feel free to ask for comments if +you are unsure about the solution you would like to submit. + +While Cargo does make use of some Rust-features available only through the +`nightly` toolchain, it must compile on stable Rust. Code added to Cargo +is encouraged to make use of the latest stable features of the language and +`stdlib`. + +We use the "fork and pull" model [described here][development-models], where +contributors push changes to their personal fork and create pull requests to +bring those changes into the source repository. This process is partly +automated: Pull requests are made against Cargo's master-branch, tested and +reviewed. Once a change is approved to be merged, a friendly bot merges the +changes into an internal branch, runs the full test-suite on that branch +and only then merges into master. This ensures that Cargo's master branch +passes the test-suite at all times. + +Your basic steps to get going: + +* Fork Cargo and create a branch from master for the issue you are working on. +* Please adhere to the code style that you see around the location you are +working on. +* [Commit as you go][githelp]. +* Include tests that cover all non-trivial code. The existing tests +in `test/` provide templates on how to test Cargo's behavior in a +sandbox-environment. The internal crate `cargotest` provides a vast amount +of helpers to minimize boilerplate. +* Make sure `cargo test` passes. If you do not have the cross-compilers +installed locally, install them using the instructions returned by +`cargo test cross_compile::cross_tests` (twice, with `--toolchain nightly` +added to get the nightly cross target too); alternatively just +ignore the cross-compile test failures or disable them by +using `CFG_DISABLE_CROSS_TESTS=1 cargo test`. Note that some tests are enabled +only on `nightly` toolchain. If you can, test both toolchains. +* All code changes are expected to comply with the formatting suggested by `rustfmt`. +You can use `rustup +stable component add rustfmt-preview` to install `rustfmt` and use +`rustfmt +stable --skip-children $FILE` on the changed files to automatically format your code. +* Push your commits to GitHub and create a pull request against Cargo's +`master` branch. + +## Pull requests + +After the pull request is made, a friendly bot will automatically assign a +reviewer; the review-process will make sure that the proposed changes are +sound. Please give the assigned reviewer sufficient time, especially during +weekends. If you don't get a reply, you may poke the core developers on [IRC]. + +A merge of Cargo's master-branch and your changes is immediately queued +to be tested after the pull request is made. In case unforeseen +problems are discovered during this step (e.g. a failure on a platform you +originally did not develop on), you may ask for guidance. Push additional +commits to your branch to tackle these problems. + +The reviewer might point out changes deemed necessary. Please add them as +extra commits; this ensures that the reviewer can see what has changed since +the code was previously reviewed. Large or tricky changes may require several +passes of review and changes. + +Once the reviewer approves your pull request, a friendly bot picks it up +and [merges][mergequeue] it into Cargo's `master` branch. + +## Contributing to the documentation + +To contribute to the documentation, all you need to do is change the markdown +files in the `src/doc` directory. To view the rendered version of changes you +have made locally, make sure you have `mdbook` installed and run: + +```sh +cd src/doc +mdbook build +open book/index.html +``` + +To install `mdbook` run `cargo install mdbook`. + + +## Issue Triage + +Sometimes an issue will stay open, even though the bug has been fixed. And +sometimes, the original bug may go stale because something has changed in the +meantime. + +It can be helpful to go through older bug reports and make sure that they are +still valid. Load up an older issue, double check that it's still true, and +leave a comment letting us know if it is or is not. The [least recently +updated sort][lru] is good for finding issues like this. + +Contributors with sufficient permissions on the Rust-repository can help by +adding labels to triage issues: + +* Yellow, **A**-prefixed labels state which **area** of the project an issue + relates to. + +* Magenta, **B**-prefixed labels identify bugs which are **blockers**. + +* Light purple, **C**-prefixed labels represent the **category** of an issue. + +* Dark purple, **Command**-prefixed labels mean the issue has to do with a + specific cargo command. + +* Green, **E**-prefixed labels explain the level of **experience** or + **effort** necessary to fix the issue. [**E-mentor**][E-mentor] issues also + have some instructions on how to get started. + +* Red, **I**-prefixed labels indicate the **importance** of the issue. The + **[I-nominated][]** label indicates that an issue has been nominated for + prioritizing at the next triage meeting. + +* Purple gray, **O**-prefixed labels are the **operating system** or platform + that this issue is specific to. + +* Orange, **P**-prefixed labels indicate a bug's **priority**. These labels + are only assigned during triage meetings and replace the **[I-nominated][]** + label. + +* The light orange **relnotes** label marks issues that should be documented in + the release notes of the next release. + + +[githelp]: https://dont-be-afraid-to-commit.readthedocs.io/en/latest/git/commandlinegit.html +[development-models]: https://help.github.com/articles/about-collaborative-development-models/ +[gist]: https://gist.github.com/ +[new-issues]: https://github.com/rust-lang/cargo/issues/new +[mergequeue]: https://buildbot2.rust-lang.org/homu/queue/cargo +[security policy]: https://www.rust-lang.org/security.html +[lru]: https://github.com/rust-lang/cargo/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-asc +[E-easy]: https://github.com/rust-lang/cargo/labels/E-easy +[E-mentor]: https://github.com/rust-lang/cargo/labels/E-mentor +[I-nominated]: https://github.com/rust-lang/cargo/labels/I-nominated +[Code of Conduct]: https://www.rust-lang.org/conduct.html +[IRC]: https://kiwiirc.com/client/irc.mozilla.org/cargo diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..5217104eb --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,92 @@ +[package] +name = "cargo" +version = "0.28.0" +authors = ["Yehuda Katz ", + "Carl Lerche ", + "Alex Crichton "] +license = "MIT OR Apache-2.0" +homepage = "https://crates.io" +repository = "https://github.com/rust-lang/cargo" +documentation = "https://docs.rs/cargo" +description = """ +Cargo, a package manager for Rust. +""" + +[lib] +name = "cargo" +path = "src/cargo/lib.rs" + +[dependencies] +atty = "0.2" +crates-io = { path = "src/crates-io", version = "0.16" } +crossbeam = "0.3" +crypto-hash = "0.3.1" +curl = "0.4.12" +env_logger = "0.5.4" +failure = "0.1.1" +filetime = "0.2" +flate2 = "1.0" +fs2 = "0.4" +git2 = "0.7.0" +git2-curl = "0.8.1" +glob = "0.2.11" +hex = "0.3" +home = "0.3" +ignore = "0.4" +lazy_static = "1.0.0" +jobserver = "0.1.9" +lazycell = "0.6" +libc = "0.2" +libgit2-sys = "0.7.1" +log = "0.4" +num_cpus = "1.0" +same-file = "1" +semver = { version = "0.9.0", features = ["serde"] } +serde = "1.0" +serde_derive = "1.0" +serde_ignored = "0.0.4" +serde_json = "1.0" +shell-escape = "0.1" +tar = { version = "0.4.15", default-features = false } +tempfile = "3.0" +termcolor = "0.3" +toml = "0.4.2" +url = "1.1" +clap = "2.31.2" + +# Not actually needed right now but required to make sure that rls/cargo build +# with the same set of features in rust-lang/rust +num-traits = "0.2" # enable the default feature + +[target.'cfg(target_os = "macos")'.dependencies] +core-foundation = { version = "0.5.1", features = ["mac_os_10_7_support"] } + +[target.'cfg(windows)'.dependencies] +miow = "0.3" + +[target.'cfg(windows)'.dependencies.winapi] +version = "0.3" +features = [ + "handleapi", + "jobapi", + "jobapi2", + "minwindef", + "ntdef", + "ntstatus", + "processenv", + "processthreadsapi", + "psapi", + "synchapi", + "winerror", + "winbase", + "wincon", + "winnt", +] + +[dev-dependencies] +bufstream = "0.1" + +[[bin]] +name = "cargo" +test = false +doc = false diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/LICENSE-THIRD-PARTY b/LICENSE-THIRD-PARTY new file mode 100644 index 000000000..c9897b96f --- /dev/null +++ b/LICENSE-THIRD-PARTY @@ -0,0 +1,1272 @@ +The Cargo source code itself does not bundle any third party libraries, but it +depends on a number of libraries which carry their own copyright notices and +license terms. These libraries are normally all linked static into the binary +distributions of Cargo: + +* OpenSSL - http://www.openssl.org/source/license.html + + Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3. All advertising materials mentioning features or use of this + software must display the following acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + + 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + endorse or promote products derived from this software without + prior written permission. For written permission, please contact + openssl-core@openssl.org. + + 5. Products derived from this software may not be called "OpenSSL" + nor may "OpenSSL" appear in their names without prior written + permission of the OpenSSL Project. + + 6. Redistributions of any form whatsoever must retain the following + acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit (http://www.openssl.org/)" + + THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. + ==================================================================== + + This product includes cryptographic software written by Eric Young + (eay@cryptsoft.com). This product includes software written by Tim + Hudson (tjh@cryptsoft.com). + + --- + + Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + All rights reserved. + + This package is an SSL implementation written + by Eric Young (eay@cryptsoft.com). + The implementation was written so as to conform with Netscapes SSL. + + This library is free for commercial and non-commercial use as long as + the following conditions are aheared to. The following conditions + apply to all code found in this distribution, be it the RC4, RSA, + lhash, DES, etc., code; not just the SSL code. The SSL documentation + included with this distribution is covered by the same copyright terms + except that the holder is Tim Hudson (tjh@cryptsoft.com). + + Copyright remains Eric Young's, and as such any Copyright notices in + the code are not to be removed. + If this package is used in a product, Eric Young should be given attribution + as the author of the parts of the library used. + This can be in the form of a textual message at program startup or + in documentation (online or textual) provided with the package. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. All advertising materials mentioning features or use of this software + must display the following acknowledgement: + "This product includes cryptographic software written by + Eric Young (eay@cryptsoft.com)" + The word 'cryptographic' can be left out if the rouines from the library + being used are not cryptographic related :-). + 4. If you include any Windows specific code (or a derivative thereof) from + the apps directory (application code) you must include an acknowledgement: + "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + + THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + + The licence and distribution terms for any publically available version or + derivative of this code cannot be changed. i.e. this code cannot simply be + copied and put under another distribution licence + [including the GNU Public Licence.] + +* libgit2 - https://github.com/libgit2/libgit2/blob/master/COPYING + + libgit2 is Copyright (C) the libgit2 contributors, + unless otherwise stated. See the AUTHORS file for details. + + Note that the only valid version of the GPL as far as this project + is concerned is _this_ particular version of the license (ie v2, not + v2.2 or v3.x or whatever), unless explicitly otherwise stated. + + ---------------------------------------------------------------------- + + LINKING EXCEPTION + + In addition to the permissions in the GNU General Public License, + the authors give you unlimited permission to link the compiled + version of this library into combinations with other programs, + and to distribute those combinations without any restriction + coming from the use of this file. (The General Public License + restrictions do apply in other respects; for example, they cover + modification of the file, and distribution when not linked into + a combined executable.) + + ---------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + License is intended to guarantee your freedom to share and change free + software--to make sure the software is free for all its users. This + General Public License applies to most of the Free Software + Foundation's software and to any other program whose authors commit to + using it. (Some other Free Software Foundation software is covered by + the GNU Library General Public License instead.) You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + this service if you wish), that you receive source code or can get it + if you want it, that you can change the software or use pieces of it + in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid + anyone to deny you these rights or to ask you to surrender the rights. + These restrictions translate to certain responsibilities for you if you + distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must give the recipients all the rights that + you have. You must make sure that they, too, receive or can get the + source code. And you must show them these terms so they know their + rights. + + We protect your rights with two steps: (1) copyright the software, and + (2) offer you this license which gives you legal permission to copy, + distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain + that everyone understands that there is no warranty for this free + software. If the software is modified by someone else and passed on, we + want its recipients to know that what they have is not the original, so + that any problems introduced by others will not reflect on the original + authors' reputations. + + Finally, any free program is threatened constantly by software + patents. We wish to avoid the danger that redistributors of a free + program will individually obtain patent licenses, in effect making the + program proprietary. To prevent this, we have made it clear that any + patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and + modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains + a notice placed by the copyright holder saying it may be distributed + under the terms of this General Public License. The "Program", below, + refers to any such program or work, and a "work based on the Program" + means either the Program or any derivative work under copyright law: + that is to say, a work containing the Program or a portion of it, + either verbatim or with modifications and/or translated into another + language. (Hereinafter, translation is included without limitation in + the term "modification".) Each licensee is addressed as "you". + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running the Program is not restricted, and the output from the Program + is covered only if its contents constitute a work based on the + Program (independent of having been made by running the Program). + Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's + source code as you receive it, in any medium, provided that you + conspicuously and appropriately publish on each copy an appropriate + copyright notice and disclaimer of warranty; keep intact all the + notices that refer to this License and to the absence of any warranty; + and give any other recipients of the Program a copy of this License + along with the Program. + + You may charge a fee for the physical act of transferring a copy, and + you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion + of it, thus forming a work based on the Program, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Program, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Program, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Program. + + In addition, mere aggregation of another work not based on the Program + with the Program (or with a work based on the Program) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, + under Section 2) in object code or executable form under the terms of + Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + + The source code for a work means the preferred form of the work for + making modifications to it. For an executable work, complete source + code means all the source code for all modules it contains, plus any + associated interface definition files, plus the scripts used to + control compilation and installation of the executable. However, as a + special exception, the source code distributed need not include + anything that is normally distributed (in either source or binary + form) with the major components (compiler, kernel, and so on) of the + operating system on which the executable runs, unless that component + itself accompanies the executable. + + If distribution of executable or object code is made by offering + access to copy from a designated place, then offering equivalent + access to copy the source code from the same place counts as + distribution of the source code, even though third parties are not + compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense or distribute the Program is + void, and will automatically terminate your rights under this License. + However, parties who have received copies, or rights, from you under + this License will not have their licenses terminated so long as such + parties remain in full compliance. + + 5. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Program or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Program (or any work based on the + Program), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the + Program), the recipient automatically receives a license from the + original licensor to copy, distribute or modify the Program subject to + these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties to + this License. + + 7. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Program at all. For example, if a patent + license would not permit royalty-free redistribution of the Program by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Program. + + If any portion of this section is held invalid or unenforceable under + any particular circumstance, the balance of the section is intended to + apply and the section as a whole is intended to apply in other + circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system, which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Program under this License + may add an explicit geographical distribution limitation excluding + those countries, so that distribution is permitted only in or among + countries not thus excluded. In such case, this License incorporates + the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions + of the General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the Program + specifies a version number of this License which applies to it and "any + later version", you have the option of following the terms and conditions + either of that version or of any later version published by the Free + Software Foundation. If the Program does not specify a version number of + this License, you may choose any version ever published by the Free Software + Foundation. + + 10. If you wish to incorporate parts of the Program into other free + programs whose distribution conditions are different, write to the author + to ask for permission. For software which is copyrighted by the Free + Software Foundation, write to the Free Software Foundation; we sometimes + make exceptions for this. Our decision will be guided by the two goals + of preserving the free status of all derivatives of our free software and + of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY + FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN + OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES + PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED + OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE + PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, + REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR + REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, + INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING + OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED + TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY + YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + + Also add information on how to contact you by electronic and paper mail. + + If the program is interactive, make it output a short notice like this + when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, the commands you use may + be called something other than `show w' and `show c'; they could even be + mouse-clicks or menu items--whatever suits your program. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the program, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + + This General Public License does not permit incorporating your program into + proprietary programs. If your program is a subroutine library, you may + consider it more useful to permit linking proprietary applications with the + library. If this is what you want to do, use the GNU Library General + Public License instead of this License. + + ---------------------------------------------------------------------- + + The bundled ZLib code is licensed under the ZLib license: + + Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + ---------------------------------------------------------------------- + + The Clar framework is licensed under the MIT license: + + Copyright (C) 2011 by Vicent Marti + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + ---------------------------------------------------------------------- + + The regex library (deps/regex/) is licensed under the GNU LGPL + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + [This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + Licenses are intended to guarantee your freedom to share and change + free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some + specially designated software packages--typically libraries--of the + Free Software Foundation and other authors who decide to use it. You + can use it too, but we suggest you first think carefully about whether + this license or the ordinary General Public License is the better + strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, + not price. Our General Public Licenses are designed to make sure that + you have the freedom to distribute copies of free software (and charge + for this service if you wish); that you receive source code or can get + it if you want it; that you can change the software and use pieces of + it in new free programs; and that you are informed that you can do + these things. + + To protect your rights, we need to make restrictions that forbid + distributors to deny you these rights or to ask you to surrender these + rights. These restrictions translate to certain responsibilities for + you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis + or for a fee, you must give the recipients all the rights that we gave + you. You must make sure that they, too, receive or can get the source + code. If you link other code with the library, you must provide + complete object files to the recipients, so that they can relink them + with the library after making changes to the library and recompiling + it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the + library, and (2) we offer you this license, which gives you legal + permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that + there is no warranty for the free library. Also, if the library is + modified by someone else and passed on, the recipients should know + that what they have is not the original version, so that the original + author's reputation will not be affected by problems that might be + introduced by others. + + Finally, software patents pose a constant threat to the existence of + any free program. We wish to make sure that a company cannot + effectively restrict the users of a free program by obtaining a + restrictive license from a patent holder. Therefore, we insist that + any patent license obtained for a version of the library must be + consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the + ordinary GNU General Public License. This license, the GNU Lesser + General Public License, applies to certain designated libraries, and + is quite different from the ordinary General Public License. We use + this license for certain libraries in order to permit linking those + libraries into non-free programs. + + When a program is linked with a library, whether statically or using + a shared library, the combination of the two is legally speaking a + combined work, a derivative of the original library. The ordinary + General Public License therefore permits such linking only if the + entire combination fits its criteria of freedom. The Lesser General + Public License permits more lax criteria for linking other code with + the library. + + We call this license the "Lesser" General Public License because it + does Less to protect the user's freedom than the ordinary General + Public License. It also provides other free software developers Less + of an advantage over competing non-free programs. These disadvantages + are the reason we use the ordinary General Public License for many + libraries. However, the Lesser license provides advantages in certain + special circumstances. + + For example, on rare occasions, there may be a special need to + encourage the widest possible use of a certain library, so that it becomes + a de-facto standard. To achieve this, non-free programs must be + allowed to use the library. A more frequent case is that a free + library does the same job as widely used non-free libraries. In this + case, there is little to gain by limiting the free library to free + software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free + programs enables a greater number of people to use a large body of + free software. For example, permission to use the GNU C Library in + non-free programs enables many more people to use the whole GNU + operating system, as well as its variant, the GNU/Linux operating + system. + + Although the Lesser General Public License is Less protective of the + users' freedom, it does ensure that the user of a program that is + linked with the Library has the freedom and the wherewithal to run + that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and + modification follow. Pay close attention to the difference between a + "work based on the library" and a "work that uses the library". The + former contains code derived from the library, whereas the latter must + be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other + program which contains a notice placed by the copyright holder or + other authorized party saying it may be distributed under the terms of + this Lesser General Public License (also called "this License"). + Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data + prepared so as to be conveniently linked with application programs + (which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work + which has been distributed under these terms. A "work based on the + Library" means either the Library or any derivative work under + copyright law: that is to say, a work containing the Library or a + portion of it, either verbatim or with modifications and/or translated + straightforwardly into another language. (Hereinafter, translation is + included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for + making modifications to it. For a library, complete source code means + all the source code for all modules it contains, plus any associated + interface definition files, plus the scripts used to control compilation + and installation of the library. + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running a program using the Library is not restricted, and output from + such a program is covered only if its contents constitute a work based + on the Library (independent of the use of the Library in a tool for + writing it). Whether that is true depends on what the Library does + and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's + complete source code as you receive it, in any medium, provided that + you conspicuously and appropriately publish on each copy an + appropriate copyright notice and disclaimer of warranty; keep intact + all the notices that refer to this License and to the absence of any + warranty; and distribute a copy of this License along with the + Library. + + You may charge a fee for the physical act of transferring a copy, + and you may at your option offer warranty protection in exchange for a + fee. + + 2. You may modify your copy or copies of the Library or any portion + of it, thus forming a work based on the Library, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Library, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Library, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote + it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Library. + + In addition, mere aggregation of another work not based on the Library + with the Library (or with a work based on the Library) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public + License instead of this License to a given copy of the Library. To do + this, you must alter all the notices that refer to this License, so + that they refer to the ordinary GNU General Public License, version 2, + instead of to this License. (If a newer version than version 2 of the + ordinary GNU General Public License has appeared, then you can specify + that version instead if you wish.) Do not make any other change in + these notices. + + Once this change is made in a given copy, it is irreversible for + that copy, so the ordinary GNU General Public License applies to all + subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of + the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or + derivative of it, under Section 2) in object code or executable form + under the terms of Sections 1 and 2 above provided that you accompany + it with the complete corresponding machine-readable source code, which + must be distributed under the terms of Sections 1 and 2 above on a + medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy + from a designated place, then offering equivalent access to copy the + source code from the same place satisfies the requirement to + distribute the source code, even though third parties are not + compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the + Library, but is designed to work with the Library by being compiled or + linked with it, is called a "work that uses the Library". Such a + work, in isolation, is not a derivative work of the Library, and + therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library + creates an executable that is a derivative of the Library (because it + contains portions of the Library), rather than a "work that uses the + library". The executable is therefore covered by this License. + Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file + that is part of the Library, the object code for the work may be a + derivative work of the Library even though the source code is not. + Whether this is true is especially significant if the work can be + linked without the Library, or if the work is itself a library. The + threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data + structure layouts and accessors, and small macros and small inline + functions (ten lines or less in length), then the use of the object + file is unrestricted, regardless of whether it is legally a derivative + work. (Executables containing this object code plus portions of the + Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may + distribute the object code for the work under the terms of Section 6. + Any executables containing that work also fall under Section 6, + whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or + link a "work that uses the Library" with the Library to produce a + work containing portions of the Library, and distribute that work + under terms of your choice, provided that the terms permit + modification of the work for the customer's own use and reverse + engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the + Library is used in it and that the Library and its use are covered by + this License. You must supply a copy of this License. If the work + during execution displays copyright notices, you must include the + copyright notice for the Library among them, as well as a reference + directing the user to the copy of this License. Also, you must do one + of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the + Library" must include any data and utility programs needed for + reproducing the executable from it. However, as a special exception, + the materials to be distributed need not include anything that is + normally distributed (in either source or binary form) with the major + components (compiler, kernel, and so on) of the operating system on + which the executable runs, unless that component itself accompanies + the executable. + + It may happen that this requirement contradicts the license + restrictions of other proprietary libraries that do not normally + accompany the operating system. Such a contradiction means you cannot + use both them and the Library together in an executable that you + distribute. + + 7. You may place library facilities that are a work based on the + Library side-by-side in a single library together with other library + facilities not covered by this License, and distribute such a combined + library, provided that the separate distribution of the work based on + the Library and of the other library facilities is otherwise + permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute + the Library except as expressly provided under this License. Any + attempt otherwise to copy, modify, sublicense, link with, or + distribute the Library is void, and will automatically terminate your + rights under this License. However, parties who have received copies, + or rights, from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Library or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Library (or any work based on the + Library), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the + Library), the recipient automatically receives a license from the + original licensor to copy, distribute, link with or modify the Library + subject to these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties with + this License. + + 11. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Library at all. For example, if a patent + license would not permit royalty-free redistribution of the Library by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Library. + + If any portion of this section is held invalid or unenforceable under any + particular circumstance, the balance of the section is intended to apply, + and the section as a whole is intended to apply in other circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Library under this License may add + an explicit geographical distribution limitation excluding those countries, + so that distribution is permitted only in or among countries not thus + excluded. In such case, this License incorporates the limitation as if + written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new + versions of the Lesser General Public License from time to time. + Such new versions will be similar in spirit to the present version, + but may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the Library + specifies a version number of this License which applies to it and + "any later version", you have the option of following the terms and + conditions either of that version or of any later version published by + the Free Software Foundation. If the Library does not specify a + license version number, you may choose any version ever published by + the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free + programs whose distribution conditions are incompatible with these, + write to the author to ask for permission. For software which is + copyrighted by the Free Software Foundation, write to the Free + Software Foundation; we sometimes make exceptions for this. Our + decision will be guided by the two goals of preserving the free status + of all derivatives of our free software and of promoting the sharing + and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest + possible use to the public, we recommend making it free software that + everyone can redistribute and change. You can do so by permitting + redistribution under these terms (or, alternatively, under the terms of the + ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is + safest to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least the + "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + Also add information on how to contact you by electronic and paper mail. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the library, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + + That's all there is to it! + + ---------------------------------------------------------------------- + +* libssh2 - http://www.libssh2.org/license.html + + Copyright (c) 2004-2007 Sara Golemon + Copyright (c) 2005,2006 Mikhail Gusarov + Copyright (c) 2006-2007 The Written Word, Inc. + Copyright (c) 2007 Eli Fant + Copyright (c) 2009 Daniel Stenberg + Copyright (C) 2008, 2009 Simon Josefsson + All rights reserved. + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + Neither the name of the copyright holder nor the names + of any other contributors may be used to endorse or + promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. + +* libcurl - http://curl.haxx.se/docs/copyright.html + + COPYRIGHT AND PERMISSION NOTICE + + Copyright (c) 1996 - 2014, Daniel Stenberg, daniel@haxx.se. + + All rights reserved. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of a copyright holder shall not + be used in advertising or otherwise to promote the sale, use or other + dealings in this Software without prior written authorization of the + copyright holder. + +* flate2-rs - https://github.com/alexcrichton/flate2-rs/blob/master/LICENSE-MIT +* link-config - https://github.com/alexcrichton/link-config/blob/master/LICENSE-MIT +* openssl-static-sys - https://github.com/alexcrichton/openssl-static-sys/blob/master/LICENSE-MIT +* toml-rs - https://github.com/alexcrichton/toml-rs/blob/master/LICENSE-MIT +* libssh2-static-sys - https://github.com/alexcrichton/libssh2-static-sys/blob/master/LICENSE-MIT +* git2-rs - https://github.com/alexcrichton/git2-rs/blob/master/LICENSE-MIT +* tar-rs - https://github.com/alexcrichton/tar-rs/blob/master/LICENSE-MIT + + Copyright (c) 2014 Alex Crichton + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* glob - https://github.com/rust-lang/glob/blob/master/LICENSE-MIT +* semver - https://github.com/rust-lang/semver/blob/master/LICENSE-MIT + + Copyright (c) 2014 The Rust Project Developers + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-url - https://github.com/servo/rust-url/blob/master/LICENSE-MIT + + Copyright (c) 2006-2009 Graydon Hoare + Copyright (c) 2009-2013 Mozilla Foundation + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-encoding - https://github.com/lifthrasiir/rust-encoding/blob/master/LICENSE.txt + + The MIT License (MIT) + + Copyright (c) 2013, Kang Seonghoon. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* curl-rust - https://github.com/carllerche/curl-rust/blob/master/LICENSE + + Copyright (c) 2014 Carl Lerche + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* docopt.rs - https://github.com/docopt/docopt.rs/blob/master/UNLICENSE + + This is free and unencumbered software released into the public domain. + + Anyone is free to copy, modify, publish, use, compile, sell, or + distribute this software, either in source code form or as a compiled + binary, for any purpose, commercial or non-commercial, and by any + means. + + In jurisdictions that recognize copyright laws, the author or authors + of this software dedicate any and all copyright interest in the + software to the public domain. We make this dedication for the benefit + of the public at large and to the detriment of our heirs and + successors. We intend this dedication to be an overt act of + relinquishment in perpetuity of all present and future rights to this + software under copyright law. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + For more information, please refer to + diff --git a/README.md b/README.md new file mode 100644 index 000000000..ed8ca9791 --- /dev/null +++ b/README.md @@ -0,0 +1,90 @@ +# Cargo + +Cargo downloads your Rust project’s dependencies and compiles your project. + +Learn more at https://doc.rust-lang.org/cargo/ + +## Code Status + +[![Build Status](https://travis-ci.org/rust-lang/cargo.svg?branch=master)](https://travis-ci.org/rust-lang/cargo) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/rust-lang/cargo?branch=master&svg=true)](https://ci.appveyor.com/project/rust-lang-libs/cargo) + +Code documentation: https://docs.rs/cargo/ + +## Installing Cargo + +Cargo is distributed by default with Rust, so if you've got `rustc` installed +locally you probably also have `cargo` installed locally. + +## Compiling from Source + +Cargo requires the following tools and packages to build: + +* `python` +* `curl` (on Unix) +* `cmake` +* OpenSSL headers (only for Unix, this is the `libssl-dev` package on ubuntu) +* `cargo` and `rustc` + +First, you'll want to check out this repository + +``` +git clone https://github.com/rust-lang/cargo +cd cargo +``` + +With `cargo` already installed, you can simply run: + +``` +cargo build --release +``` + +## Adding new subcommands to Cargo + +Cargo is designed to be extensible with new subcommands without having to modify +Cargo itself. See [the Wiki page][third-party-subcommands] for more details and +a list of known community-developed subcommands. + +[third-party-subcommands]: https://github.com/rust-lang/cargo/wiki/Third-party-cargo-subcommands + + +## Releases + +High level release notes are available as part of [Rust's release notes][rel]. +Cargo releases coincide with Rust releases. + +[rel]: https://github.com/rust-lang/rust/blob/master/RELEASES.md + +## Reporting issues + +Found a bug? We'd love to know about it! + +Please report all issues on the GitHub [issue tracker][issues]. + +[issues]: https://github.com/rust-lang/cargo/issues + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md). You may also find the architecture +documentation useful ([ARCHITECTURE.md](ARCHITECTURE.md)). + +## License + +Cargo is primarily distributed under the terms of both the MIT license +and the Apache License (Version 2.0). + +See LICENSE-APACHE and LICENSE-MIT for details. + +### Third party software + +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (http://www.openssl.org/). + +In binary form, this product includes software that is licensed under the +terms of the GNU General Public License, version 2, with a linking exception, +which can be obtained from the [upstream repository][1]. + +See LICENSE-THIRD-PARTY for details. + +[1]: https://github.com/libgit2/libgit2 + diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000..d901dbe18 --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,21 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + OTHER_TARGET: i686-pc-windows-msvc + MAKE_TARGETS: test-unit-x86_64-pc-windows-msvc + +install: + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - rustup target add %OTHER_TARGET% + - rustc -V + - cargo -V + - git submodule update --init + +clone_depth: 1 + +build: false + +test_script: + - cargo test diff --git a/src/bin/cargo/cli.rs b/src/bin/cargo/cli.rs new file mode 100644 index 000000000..9fbd12cbc --- /dev/null +++ b/src/bin/cargo/cli.rs @@ -0,0 +1,214 @@ +extern crate clap; + +use clap::{AppSettings, Arg, ArgMatches}; + +use cargo::{self, CliResult, Config}; + +use super::list_commands; +use super::commands; +use command_prelude::*; + +pub fn main(config: &mut Config) -> CliResult { + let args = cli().get_matches_safe()?; + + if args.value_of("unstable-features") == Some("help") { + println!( + " +Available unstable (nightly-only) flags: + + -Z avoid-dev-deps -- Avoid installing dev-dependencies if possible + -Z minimal-versions -- Install minimal dependency versions instead of maximum + -Z no-index-update -- Do not update the registry, avoids a network request for benchmarking + -Z offline -- Offline mode that does not perform network requests + -Z unstable-options -- Allow the usage of unstable options such as --registry + +Run with 'cargo -Z [FLAG] [SUBCOMMAND]'" + ); + return Ok(()); + } + + let is_verbose = args.occurrences_of("verbose") > 0; + if args.is_present("version") { + let version = cargo::version(); + println!("{}", version); + if is_verbose { + println!( + "release: {}.{}.{}", + version.major, version.minor, version.patch + ); + if let Some(ref cfg) = version.cfg_info { + if let Some(ref ci) = cfg.commit_info { + println!("commit-hash: {}", ci.commit_hash); + println!("commit-date: {}", ci.commit_date); + } + } + } + return Ok(()); + } + + if let Some(ref code) = args.value_of("explain") { + let mut procss = config.rustc(None)?.process(); + procss.arg("--explain").arg(code).exec()?; + return Ok(()); + } + + if args.is_present("list") { + println!("Installed Commands:"); + for command in list_commands(config) { + let (command, path) = command; + if is_verbose { + match path { + Some(p) => println!(" {:<20} {}", command, p), + None => println!(" {:<20}", command), + } + } else { + println!(" {}", command); + } + } + return Ok(()); + } + + let args = expand_aliases(config, args)?; + + execute_subcommand(config, args) +} + +fn expand_aliases( + config: &mut Config, + args: ArgMatches<'static>, +) -> Result, CliError> { + if let (cmd, Some(args)) = args.subcommand() { + match ( + commands::builtin_exec(cmd), + super::aliased_command(config, cmd)?, + ) { + (None, Some(mut alias)) => { + alias.extend( + args.values_of("") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + let args = cli() + .setting(AppSettings::NoBinaryName) + .get_matches_from_safe(alias)?; + return expand_aliases(config, args); + } + (Some(_), Some(_)) => { + config.shell().warn(format!( + "alias `{}` is ignored, because it is shadowed by a built in command", + cmd + ))?; + } + (_, None) => {} + } + }; + Ok(args) +} + +fn execute_subcommand(config: &mut Config, args: ArgMatches) -> CliResult { + let (cmd, subcommand_args) = match args.subcommand() { + (cmd, Some(args)) => (cmd, args), + _ => { + cli().print_help()?; + return Ok(()); + } + }; + + let arg_target_dir = &subcommand_args.value_of_path("target-dir", config); + + config.configure( + args.occurrences_of("verbose") as u32, + if args.is_present("quiet") { + Some(true) + } else { + None + }, + &args.value_of("color").map(|s| s.to_string()), + args.is_present("frozen"), + args.is_present("locked"), + arg_target_dir, + &args.values_of_lossy("unstable-features") + .unwrap_or_default(), + )?; + + if let Some(exec) = commands::builtin_exec(cmd) { + return exec(config, subcommand_args); + } + + let mut ext_args: Vec<&str> = vec![cmd]; + ext_args.extend(subcommand_args.values_of("").unwrap_or_default()); + super::execute_external_subcommand(config, cmd, &ext_args) +} + +fn cli() -> App { + let app = App::new("cargo") + .settings(&[ + AppSettings::UnifiedHelpMessage, + AppSettings::DeriveDisplayOrder, + AppSettings::VersionlessSubcommands, + AppSettings::AllowExternalSubcommands, + ]) + .about("") + .template( + "\ +Rust's package manager + +USAGE: + {usage} + +OPTIONS: +{unified} + +Some common cargo commands are (see all commands with --list): + build Compile the current project + check Analyze the current project and report errors, but don't build object files + clean Remove the target directory + doc Build this project's and its dependencies' documentation + new Create a new cargo project + init Create a new cargo project in an existing directory + run Build and execute src/main.rs + test Run the tests + bench Run the benchmarks + update Update dependencies listed in Cargo.lock + search Search registry for crates + publish Package and upload this project to the registry + install Install a Rust binary + uninstall Uninstall a Rust binary + +See 'cargo help ' for more information on a specific command.\n", + ) + .arg(opt("version", "Print version info and exit").short("V")) + .arg(opt("list", "List installed commands")) + .arg(opt("explain", "Run `rustc --explain CODE`").value_name("CODE")) + .arg( + opt( + "verbose", + "Use verbose output (-vv very verbose/build.rs output)", + ).short("v") + .multiple(true) + .global(true), + ) + .arg( + opt("quiet", "No output printed to stdout") + .short("q") + .global(true), + ) + .arg( + opt("color", "Coloring: auto, always, never") + .value_name("WHEN") + .global(true), + ) + .arg(opt("frozen", "Require Cargo.lock and cache are up to date").global(true)) + .arg(opt("locked", "Require Cargo.lock is up to date").global(true)) + .arg( + Arg::with_name("unstable-features") + .help("Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details") + .short("Z") + .value_name("FLAG") + .multiple(true) + .number_of_values(1) + .global(true), + ) + .subcommands(commands::builtin()); + app +} diff --git a/src/bin/cargo/command_prelude.rs b/src/bin/cargo/command_prelude.rs new file mode 100644 index 000000000..3ef518e57 --- /dev/null +++ b/src/bin/cargo/command_prelude.rs @@ -0,0 +1,403 @@ +use std::path::PathBuf; +use std::fs; + +use clap::{self, SubCommand}; +use cargo::CargoResult; +use cargo::core::Workspace; +use cargo::core::compiler::{BuildConfig, MessageFormat}; +use cargo::ops::{CompileFilter, CompileOptions, NewOptions, Packages, VersionControl}; +use cargo::util::paths; +use cargo::util::important_paths::find_root_manifest_for_wd; + +pub use clap::{AppSettings, Arg, ArgMatches}; +pub use cargo::{CliError, CliResult, Config}; +pub use cargo::core::compiler::CompileMode; + +pub type App = clap::App<'static, 'static>; + +pub trait AppExt: Sized { + fn _arg(self, arg: Arg<'static, 'static>) -> Self; + + fn arg_package_spec( + self, + package: &'static str, + all: &'static str, + exclude: &'static str, + ) -> Self { + self.arg_package_spec_simple(package) + ._arg(opt("all", all)) + ._arg(multi_opt("exclude", "SPEC", exclude)) + } + + fn arg_package_spec_simple(self, package: &'static str) -> Self { + self._arg(multi_opt("package", "SPEC", package).short("p")) + } + + fn arg_package(self, package: &'static str) -> Self { + self._arg(opt("package", package).short("p").value_name("SPEC")) + } + + fn arg_jobs(self) -> Self { + self._arg( + opt("jobs", "Number of parallel jobs, defaults to # of CPUs") + .short("j") + .value_name("N"), + ) + } + + fn arg_targets_all( + self, + lib: &'static str, + bin: &'static str, + bins: &'static str, + example: &'static str, + examples: &'static str, + test: &'static str, + tests: &'static str, + bench: &'static str, + benches: &'static str, + all: &'static str, + ) -> Self { + self.arg_targets_lib_bin(lib, bin, bins) + ._arg(multi_opt("example", "NAME", example)) + ._arg(opt("examples", examples)) + ._arg(multi_opt("test", "NAME", test)) + ._arg(opt("tests", tests)) + ._arg(multi_opt("bench", "NAME", bench)) + ._arg(opt("benches", benches)) + ._arg(opt("all-targets", all)) + } + + fn arg_targets_lib_bin(self, lib: &'static str, bin: &'static str, bins: &'static str) -> Self { + self._arg(opt("lib", lib)) + ._arg(multi_opt("bin", "NAME", bin)) + ._arg(opt("bins", bins)) + } + + fn arg_targets_bins_examples( + self, + bin: &'static str, + bins: &'static str, + example: &'static str, + examples: &'static str, + ) -> Self { + self._arg(multi_opt("bin", "NAME", bin)) + ._arg(opt("bins", bins)) + ._arg(multi_opt("example", "NAME", example)) + ._arg(opt("examples", examples)) + } + + fn arg_targets_bin_example(self, bin: &'static str, example: &'static str) -> Self { + self._arg(multi_opt("bin", "NAME", bin)) + ._arg(multi_opt("example", "NAME", example)) + } + + fn arg_features(self) -> Self { + self._arg( + opt("features", "Space-separated list of features to activate").value_name("FEATURES"), + )._arg(opt("all-features", "Activate all available features")) + ._arg(opt( + "no-default-features", + "Do not activate the `default` feature", + )) + } + + fn arg_release(self, release: &'static str) -> Self { + self._arg(opt("release", release)) + } + + fn arg_doc(self, doc: &'static str) -> Self { + self._arg(opt("doc", doc)) + } + + fn arg_target_triple(self, target: &'static str) -> Self { + self._arg(opt("target", target).value_name("TRIPLE")) + } + + fn arg_target_dir(self) -> Self { + self._arg(opt("target-dir", "Directory for all generated artifacts").value_name("DIRECTORY")) + } + + fn arg_manifest_path(self) -> Self { + self._arg(opt("manifest-path", "Path to Cargo.toml").value_name("PATH")) + } + + fn arg_message_format(self) -> Self { + self._arg( + opt("message-format", "Error format") + .value_name("FMT") + .case_insensitive(true) + .possible_values(&["human", "json"]) + .default_value("human"), + ) + } + + fn arg_new_opts(self) -> Self { + self._arg( + opt( + "vcs", + "\ + Initialize a new repository for the given version \ + control system (git, hg, pijul, or fossil) or do not \ + initialize any version control at all (none), overriding \ + a global configuration.", + ).value_name("VCS") + .possible_values(&["git", "hg", "pijul", "fossil", "none"]), + )._arg(opt("bin", "Use a binary (application) template [default]")) + ._arg(opt("lib", "Use a library template")) + ._arg( + opt( + "name", + "Set the resulting package name, defaults to the directory name", + ).value_name("NAME"), + ) + } + + fn arg_index(self) -> Self { + self._arg(opt("index", "Registry index to upload the package to").value_name("INDEX")) + ._arg( + opt("host", "DEPRECATED, renamed to '--index'") + .value_name("HOST") + .hidden(true), + ) + } +} + +impl AppExt for App { + fn _arg(self, arg: Arg<'static, 'static>) -> Self { + self.arg(arg) + } +} + +pub fn opt(name: &'static str, help: &'static str) -> Arg<'static, 'static> { + Arg::with_name(name).long(name).help(help) +} + +pub fn multi_opt( + name: &'static str, + value_name: &'static str, + help: &'static str, +) -> Arg<'static, 'static> { + // Note that all `.multiple(true)` arguments in Cargo should specify + // `.number_of_values(1)` as well, so that `--foo val1 val2` is + // **not** parsed as `foo` with values ["val1", "val2"]. + // `number_of_values` should become the default in clap 3. + opt(name, help) + .value_name(value_name) + .multiple(true) + .number_of_values(1) +} + +pub fn subcommand(name: &'static str) -> App { + SubCommand::with_name(name).settings(&[ + AppSettings::UnifiedHelpMessage, + AppSettings::DeriveDisplayOrder, + AppSettings::DontCollapseArgsInUsage, + ]) +} + +pub trait ArgMatchesExt { + fn value_of_u32(&self, name: &str) -> CargoResult> { + let arg = match self._value_of(name) { + None => None, + Some(arg) => Some(arg.parse::().map_err(|_| { + clap::Error::value_validation_auto(format!("could not parse `{}` as a number", arg)) + })?), + }; + Ok(arg) + } + + /// Returns value of the `name` command-line argument as an absolute path + fn value_of_path(&self, name: &str, config: &Config) -> Option { + self._value_of(name).map(|path| config.cwd().join(path)) + } + + fn root_manifest(&self, config: &Config) -> CargoResult { + if let Some(path) = self.value_of_path("manifest-path", config) { + // In general, we try to avoid normalizing paths in Cargo, + // but in this particular case we need it to fix #3586. + let path = paths::normalize_path(&path); + if !path.ends_with("Cargo.toml") { + bail!("the manifest-path must be a path to a Cargo.toml file") + } + if !fs::metadata(&path).is_ok() { + bail!( + "manifest path `{}` does not exist", + self._value_of("manifest-path").unwrap() + ) + } + return Ok(path); + } + find_root_manifest_for_wd(config.cwd()) + } + + fn workspace<'a>(&self, config: &'a Config) -> CargoResult> { + let root = self.root_manifest(config)?; + let mut ws = Workspace::new(&root, config)?; + if config.cli_unstable().avoid_dev_deps { + ws.set_require_optional_deps(false); + } + Ok(ws) + } + + fn jobs(&self) -> CargoResult> { + self.value_of_u32("jobs") + } + + fn target(&self) -> Option { + self._value_of("target").map(|s| s.to_string()) + } + + fn compile_options<'a>( + &self, + config: &'a Config, + mode: CompileMode, + ) -> CargoResult> { + let spec = Packages::from_flags( + self._is_present("all"), + self._values_of("exclude"), + self._values_of("package"), + )?; + + let message_format = match self._value_of("message-format") { + None => MessageFormat::Human, + Some(f) => { + if f.eq_ignore_ascii_case("json") { + MessageFormat::Json + } else if f.eq_ignore_ascii_case("human") { + MessageFormat::Human + } else { + panic!("Impossible message format: {:?}", f) + } + } + }; + + let mut build_config = BuildConfig::new(config, self.jobs()?, &self.target(), mode)?; + build_config.message_format = message_format; + build_config.release = self._is_present("release"); + + let opts = CompileOptions { + config, + build_config, + features: self._values_of("features"), + all_features: self._is_present("all-features"), + no_default_features: self._is_present("no-default-features"), + spec, + filter: CompileFilter::new( + self._is_present("lib"), + self._values_of("bin"), + self._is_present("bins"), + self._values_of("test"), + self._is_present("tests"), + self._values_of("example"), + self._is_present("examples"), + self._values_of("bench"), + self._is_present("benches"), + self._is_present("all-targets"), + ), + target_rustdoc_args: None, + target_rustc_args: None, + export_dir: None, + }; + Ok(opts) + } + + fn compile_options_for_single_package<'a>( + &self, + config: &'a Config, + mode: CompileMode, + ) -> CargoResult> { + let mut compile_opts = self.compile_options(config, mode)?; + compile_opts.spec = Packages::Packages(self._values_of("package")); + Ok(compile_opts) + } + + fn new_options(&self, config: &Config) -> CargoResult { + let vcs = self._value_of("vcs").map(|vcs| match vcs { + "git" => VersionControl::Git, + "hg" => VersionControl::Hg, + "pijul" => VersionControl::Pijul, + "fossil" => VersionControl::Fossil, + "none" => VersionControl::NoVcs, + vcs => panic!("Impossible vcs: {:?}", vcs), + }); + NewOptions::new( + vcs, + self._is_present("bin"), + self._is_present("lib"), + self.value_of_path("path", config).unwrap(), + self._value_of("name").map(|s| s.to_string()), + ) + } + + fn registry(&self, config: &Config) -> CargoResult> { + match self._value_of("registry") { + Some(registry) => { + if !config.cli_unstable().unstable_options { + return Err(format_err!( + "registry option is an unstable feature and \ + requires -Zunstable-options to use." + ).into()); + } + Ok(Some(registry.to_string())) + } + None => Ok(None), + } + } + + fn index(&self, config: &Config) -> CargoResult> { + // TODO: Deprecated + // remove once it has been decided --host can be removed + // We may instead want to repurpose the host flag, as + // mentioned in this issue + // https://github.com/rust-lang/cargo/issues/4208 + let msg = "The flag '--host' is no longer valid. + +Previous versions of Cargo accepted this flag, but it is being +deprecated. The flag is being renamed to 'index', as the flag +wants the location of the index. Please use '--index' instead. + +This will soon become a hard error, so it's either recommended +to update to a fixed version or contact the upstream maintainer +about this warning."; + + let index = match self._value_of("host") { + Some(host) => { + config.shell().warn(&msg)?; + Some(host.to_string()) + } + None => self._value_of("index").map(|s| s.to_string()), + }; + Ok(index) + } + + fn _value_of(&self, name: &str) -> Option<&str>; + + fn _values_of(&self, name: &str) -> Vec; + + fn _is_present(&self, name: &str) -> bool; +} + +impl<'a> ArgMatchesExt for ArgMatches<'a> { + fn _value_of(&self, name: &str) -> Option<&str> { + self.value_of(name) + } + + fn _values_of(&self, name: &str) -> Vec { + self.values_of(name) + .unwrap_or_default() + .map(|s| s.to_string()) + .collect() + } + + fn _is_present(&self, name: &str) -> bool { + self.is_present(name) + } +} + +pub fn values(args: &ArgMatches, name: &str) -> Vec { + args.values_of(name) + .unwrap_or_default() + .map(|s| s.to_string()) + .collect() +} diff --git a/src/bin/cargo/commands/bench.rs b/src/bin/cargo/commands/bench.rs new file mode 100644 index 000000000..ea12a4575 --- /dev/null +++ b/src/bin/cargo/commands/bench.rs @@ -0,0 +1,105 @@ +use command_prelude::*; + +use cargo::ops::{self, TestOptions}; + +pub fn cli() -> App { + subcommand("bench") + .setting(AppSettings::TrailingVarArg) + .about("Execute all benchmarks of a local package") + .arg( + Arg::with_name("BENCHNAME") + .help("If specified, only run benches containing this string in their names"), + ) + .arg( + Arg::with_name("args") + .help("Arguments for the bench binary") + .multiple(true) + .last(true), + ) + .arg_targets_all( + "Benchmark only this package's library", + "Benchmark only the specified binary", + "Benchmark all binaries", + "Benchmark only the specified example", + "Benchmark all examples", + "Benchmark only the specified test target", + "Benchmark all tests", + "Benchmark only the specified bench target", + "Benchmark all benches", + "Benchmark all targets (default)", + ) + .arg(opt("no-run", "Compile, but don't run benchmarks")) + .arg_package_spec( + "Package to run benchmarks for", + "Benchmark all packages in the workspace", + "Exclude packages from the benchmark", + ) + .arg_jobs() + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .arg(opt( + "no-fail-fast", + "Run all benchmarks regardless of failure", + )) + .after_help( + "\ +The benchmark filtering argument `BENCHNAME` and all the arguments following the +two dashes (`--`) are passed to the benchmark binaries and thus to libtest +(rustc's built in unit-test and micro-benchmarking framework). If you're +passing arguments to both Cargo and the binary, the ones after `--` go to the +binary, the ones before go to Cargo. For details about libtest's arguments see +the output of `cargo bench -- --help`. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be benchmarked. If it is not given, then +the current package is benchmarked. For more information on SPEC and its format, +see the `cargo help pkgid` command. + +All packages in the workspace are benchmarked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The --jobs argument affects the building of the benchmark executable but does +not affect how many jobs are used when running the benchmarks. + +Compilation can be customized with the `bench` profile in the manifest. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = args.compile_options(config, CompileMode::Bench)?; + compile_opts.build_config.release = true; + + let ops = TestOptions { + no_run: args.is_present("no-run"), + no_fail_fast: args.is_present("no-fail-fast"), + only_doc: false, + compile_opts, + }; + + let mut bench_args = vec![]; + bench_args.extend( + args.value_of("BENCHNAME") + .into_iter() + .map(|s| s.to_string()), + ); + bench_args.extend( + args.values_of("args") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + + let err = ops::run_benches(&ws, &ops, &bench_args)?; + match err { + None => Ok(()), + Some(err) => Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new(format_err!("bench failed"), i), + None => CliError::new(err.into(), 101), + }), + } +} diff --git a/src/bin/cargo/commands/build.rs b/src/bin/cargo/commands/build.rs new file mode 100644 index 000000000..5dbc3850a --- /dev/null +++ b/src/bin/cargo/commands/build.rs @@ -0,0 +1,63 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("build") + .alias("b") + .about("Compile a local package and all of its dependencies") + .arg_package_spec( + "Package to build", + "Build all packages in the workspace", + "Exclude packages from the build", + ) + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets (lib and bin targets by default)", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg(opt("out-dir", "Copy final artifacts to this directory").value_name("PATH")) + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be built. If it is not given, then the +current package is built. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are built if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the --release flag will use the `release` profile instead. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = args.compile_options(config, CompileMode::Build)?; + compile_opts.export_dir = args.value_of_path("out-dir", config); + if compile_opts.export_dir.is_some() && !config.cli_unstable().unstable_options { + Err(format_err!( + "`--out-dir` flag is unstable, pass `-Z unstable-options` to enable it" + ))?; + }; + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/check.rs b/src/bin/cargo/commands/check.rs new file mode 100644 index 000000000..72cebfda8 --- /dev/null +++ b/src/bin/cargo/commands/check.rs @@ -0,0 +1,72 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("check") + .about("Check a local package and all of its dependencies for errors") + .arg_package_spec( + "Package(s) to check", + "Check all packages in the workspace", + "Exclude packages from the check", + ) + .arg_jobs() + .arg_targets_all( + "Check only this package's library", + "Check only the specified binary", + "Check all binaries", + "Check only the specified example", + "Check all examples", + "Check only the specified test target", + "Check all tests", + "Check only the specified bench target", + "Check all benches", + "Check all targets (lib and bin targets by default)", + ) + .arg_release("Check artifacts in release mode, with optimizations") + .arg(opt("profile", "Profile to build the selected target for").value_name("PROFILE")) + .arg_features() + .arg_target_triple("Check for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be built. If it is not given, then the +current package is built. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are checked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the --release flag will use the `release` profile instead. + +The `--profile test` flag can be used to check unit tests with the +`#[cfg(test)]` attribute. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let test = match args.value_of("profile") { + Some("test") => true, + None => false, + Some(profile) => { + let err = format_err!( + "unknown profile: `{}`, only `test` is \ + currently supported", + profile + ); + return Err(CliError::new(err, 101)); + } + }; + let mode = CompileMode::Check { test }; + let compile_opts = args.compile_options(config, mode)?; + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/clean.rs b/src/bin/cargo/commands/clean.rs new file mode 100644 index 000000000..a7606a644 --- /dev/null +++ b/src/bin/cargo/commands/clean.rs @@ -0,0 +1,35 @@ +use command_prelude::*; + +use cargo::ops::{self, CleanOptions}; + +pub fn cli() -> App { + subcommand("clean") + .about("Remove artifacts that cargo has generated in the past") + .arg_package_spec_simple("Package to clean artifacts for") + .arg_manifest_path() + .arg_target_triple("Target triple to clean output for (default all)") + .arg_target_dir() + .arg_release("Whether or not to clean release artifacts") + .arg_doc("Whether or not to clean just the documentation directory") + .after_help( + "\ +If the --package argument is given, then SPEC is a package id specification +which indicates which package's artifacts should be cleaned out. If it is not +given, then all packages' artifacts are removed. For more information on SPEC +and its format, see the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let opts = CleanOptions { + config, + spec: values(args, "package"), + target: args.target(), + release: args.is_present("release"), + doc: args.is_present("doc"), + }; + ops::clean(&ws, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/doc.rs b/src/bin/cargo/commands/doc.rs new file mode 100644 index 000000000..7bb19fd5e --- /dev/null +++ b/src/bin/cargo/commands/doc.rs @@ -0,0 +1,59 @@ +use command_prelude::*; + +use cargo::ops::{self, DocOptions}; + +pub fn cli() -> App { + subcommand("doc") + .about("Build a package's documentation") + .arg(opt( + "open", + "Opens the docs in a browser after the operation", + )) + .arg_package_spec( + "Package to document", + "Document all packages in the workspace", + "Exclude packages from the build", + ) + .arg(opt("no-deps", "Don't build documentation for dependencies")) + .arg_jobs() + .arg_targets_lib_bin( + "Document only this package's library", + "Document only the specified binary", + "Document all binaries", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +By default the documentation for the local package and all dependencies is +built. The output is all placed in `target/doc` in rustdoc's usual format. + +All packages in the workspace are documented if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let mode = CompileMode::Doc { + deps: !args.is_present("no-deps"), + }; + let compile_opts = args.compile_options(config, mode)?; + let doc_opts = DocOptions { + open_result: args.is_present("open"), + compile_opts, + }; + ops::doc(&ws, &doc_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/fetch.rs b/src/bin/cargo/commands/fetch.rs new file mode 100644 index 000000000..f69ed256b --- /dev/null +++ b/src/bin/cargo/commands/fetch.rs @@ -0,0 +1,34 @@ +use command_prelude::*; + +use cargo::ops; +use cargo::ops::FetchOptions; + +pub fn cli() -> App { + subcommand("fetch") + .about("Fetch dependencies of a package from the network") + .arg_manifest_path() + .arg_target_triple("Fetch dependencies for the target triple") + .after_help( + "\ +If a lockfile is available, this command will ensure that all of the git +dependencies and/or registries dependencies are downloaded and locally +available. The network is never touched after a `cargo fetch` unless +the lockfile changes. + +If the lockfile is not available, then this is the equivalent of +`cargo generate-lockfile`. A lockfile is generated and dependencies are also +all updated. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + + let opts = FetchOptions { + config, + target: args.target(), + }; + ops::fetch(&ws, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/generate_lockfile.rs b/src/bin/cargo/commands/generate_lockfile.rs new file mode 100644 index 000000000..f730872be --- /dev/null +++ b/src/bin/cargo/commands/generate_lockfile.rs @@ -0,0 +1,27 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("generate-lockfile") + .about("Generate the lockfile for a project") + .arg_manifest_path() + .after_help( + "\ +If a lockfile is available, this command will ensure that all of the git +dependencies and/or registries dependencies are downloaded and locally +available. The network is never touched after a `cargo fetch` unless +the lockfile changes. + +If the lockfile is not available, then this is the equivalent of +`cargo generate-lockfile`. A lockfile is generated and dependencies are also +all updated. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + ops::generate_lockfile(&ws)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/git_checkout.rs b/src/bin/cargo/commands/git_checkout.rs new file mode 100644 index 000000000..a9401f105 --- /dev/null +++ b/src/bin/cargo/commands/git_checkout.rs @@ -0,0 +1,36 @@ +use command_prelude::*; + +use cargo::core::{GitReference, Source, SourceId}; +use cargo::sources::GitSource; +use cargo::util::ToUrl; + +pub fn cli() -> App { + subcommand("git-checkout") + .about("Checkout a copy of a Git repository") + .arg( + Arg::with_name("url") + .long("url") + .value_name("URL") + .required(true), + ) + .arg( + Arg::with_name("reference") + .long("reference") + .value_name("REF") + .required(true), + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let url = args.value_of("url").unwrap().to_url()?; + let reference = args.value_of("reference").unwrap(); + + let reference = GitReference::Branch(reference.to_string()); + let source_id = SourceId::for_git(&url, reference)?; + + let mut source = GitSource::new(&source_id, config)?; + + source.update()?; + + Ok(()) +} diff --git a/src/bin/cargo/commands/init.rs b/src/bin/cargo/commands/init.rs new file mode 100644 index 000000000..c32dead4d --- /dev/null +++ b/src/bin/cargo/commands/init.rs @@ -0,0 +1,19 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("init") + .about("Create a new cargo package in an existing directory") + .arg(Arg::with_name("path").default_value(".")) + .arg_new_opts() +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let opts = args.new_options(config)?; + ops::init(&opts, config)?; + config + .shell() + .status("Created", format!("{} project", opts.kind))?; + Ok(()) +} diff --git a/src/bin/cargo/commands/install.rs b/src/bin/cargo/commands/install.rs new file mode 100644 index 000000000..f0c65515d --- /dev/null +++ b/src/bin/cargo/commands/install.rs @@ -0,0 +1,122 @@ +use command_prelude::*; + +use cargo::core::{GitReference, SourceId}; +use cargo::ops; +use cargo::util::ToUrl; + +pub fn cli() -> App { + subcommand("install") + .about("Install a Rust binary") + .arg(Arg::with_name("crate").empty_values(false).multiple(true)) + .arg( + opt("version", "Specify a version to install from crates.io") + .alias("vers") + .value_name("VERSION"), + ) + .arg(opt("git", "Git URL to install the specified crate from").value_name("URL")) + .arg(opt("branch", "Branch to use when installing from git").value_name("BRANCH")) + .arg(opt("tag", "Tag to use when installing from git").value_name("TAG")) + .arg(opt("rev", "Specific commit to use when installing from git").value_name("SHA")) + .arg(opt("path", "Filesystem path to local crate to install").value_name("PATH")) + .arg(opt( + "list", + "list all installed packages and their versions", + )) + .arg_jobs() + .arg(opt("force", "Force overwriting existing crates or binaries").short("f")) + .arg_features() + .arg(opt("debug", "Build in debug mode instead of release mode")) + .arg_targets_bins_examples( + "Install only the specified binary", + "Install all binaries", + "Install only the specified example", + "Install all examples", + ) + .arg(opt("root", "Directory to install packages into").value_name("DIR")) + .after_help( + "\ +This command manages Cargo's local set of installed binary crates. Only packages +which have [[bin]] targets can be installed, and all binaries are installed into +the installation root's `bin` folder. The installation root is determined, in +order of precedence, by `--root`, `$CARGO_INSTALL_ROOT`, the `install.root` +configuration key, and finally the home directory (which is either +`$CARGO_HOME` if set or `$HOME/.cargo` by default). + +There are multiple sources from which a crate can be installed. The default +location is crates.io but the `--git` and `--path` flags can change this source. +If the source contains more than one package (such as crates.io or a git +repository with multiple crates) the `` argument is required to indicate +which crate should be installed. + +Crates from crates.io can optionally specify the version they wish to install +via the `--vers` flags, and similarly packages from git repositories can +optionally specify the branch, tag, or revision that should be installed. If a +crate has multiple binaries, the `--bin` argument can selectively install only +one of them, and if you'd rather install examples the `--example` argument can +be used as well. + +By default cargo will refuse to overwrite existing binaries. The `--force` flag +enables overwriting existing binaries. Thus you can reinstall a crate with +`cargo install --force `. + +As a special convenience, omitting the specification entirely will +install the crate in the current directory. That is, `install` is equivalent to +the more explicit `install --path .`. + +If the source is crates.io or `--git` then by default the crate will be built +in a temporary target directory. To avoid this, the target directory can be +specified by setting the `CARGO_TARGET_DIR` environment variable to a relative +path. In particular, this can be useful for caching build artifacts on +continuous integration systems.", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let mut compile_opts = args.compile_options(config, CompileMode::Build)?; + compile_opts.build_config.release = !args.is_present("debug"); + + let krates = args.values_of("crate") + .unwrap_or_default() + .collect::>(); + + let mut from_cwd = false; + + let source = if let Some(url) = args.value_of("git") { + let url = url.to_url()?; + let gitref = if let Some(branch) = args.value_of("branch") { + GitReference::Branch(branch.to_string()) + } else if let Some(tag) = args.value_of("tag") { + GitReference::Tag(tag.to_string()) + } else if let Some(rev) = args.value_of("rev") { + GitReference::Rev(rev.to_string()) + } else { + GitReference::Branch("master".to_string()) + }; + SourceId::for_git(&url, gitref)? + } else if let Some(path) = args.value_of_path("path", config) { + SourceId::for_path(&path)? + } else if krates.is_empty() { + from_cwd = true; + SourceId::for_path(config.cwd())? + } else { + SourceId::crates_io(config)? + }; + + let version = args.value_of("version"); + let root = args.value_of("root"); + + if args.is_present("list") { + ops::install_list(root, config)?; + } else { + ops::install( + root, + krates, + &source, + from_cwd, + version, + &compile_opts, + args.is_present("force"), + )?; + } + Ok(()) +} diff --git a/src/bin/cargo/commands/locate_project.rs b/src/bin/cargo/commands/locate_project.rs new file mode 100644 index 000000000..2e20ccfb3 --- /dev/null +++ b/src/bin/cargo/commands/locate_project.rs @@ -0,0 +1,33 @@ +use command_prelude::*; + +use cargo::print_json; + +pub fn cli() -> App { + subcommand("locate-project") + .about("Print a JSON representation of a Cargo.toml file's location") + .arg_manifest_path() +} + +#[derive(Serialize)] +pub struct ProjectLocation { + root: String, +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let root = args.root_manifest(config)?; + + let root = root.to_str() + .ok_or_else(|| { + format_err!( + "your project path contains characters \ + not representable in Unicode" + ) + }) + .map_err(|e| CliError::new(e, 1))? + .to_string(); + + let location = ProjectLocation { root }; + + print_json(&location); + Ok(()) +} diff --git a/src/bin/cargo/commands/login.rs b/src/bin/cargo/commands/login.rs new file mode 100644 index 000000000..199951048 --- /dev/null +++ b/src/bin/cargo/commands/login.rs @@ -0,0 +1,58 @@ +use command_prelude::*; + +use std::io::{self, BufRead}; + +use cargo::core::{Source, SourceId}; +use cargo::sources::RegistrySource; +use cargo::util::{CargoError, CargoResultExt}; +use cargo::ops; + +pub fn cli() -> App { + subcommand("login") + .about( + "Save an api token from the registry locally. \ + If token is not specified, it will be read from stdin.", + ) + .arg(Arg::with_name("token")) + .arg(opt("host", "Host to set the token for").value_name("HOST")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let registry = args.registry(config)?; + + let token = match args.value_of("token") { + Some(token) => token.to_string(), + None => { + let host = match registry { + Some(ref _registry) => { + return Err(format_err!( + "token must be provided when \ + --registry is provided." + ).into()); + } + None => { + let src = SourceId::crates_io(config)?; + let mut src = RegistrySource::remote(&src, config); + src.update()?; + let config = src.config()?.unwrap(); + args.value_of("host") + .map(|s| s.to_string()) + .unwrap_or(config.api.unwrap()) + } + }; + println!("please visit {}me and paste the API Token below", host); + let mut line = String::new(); + let input = io::stdin(); + input + .lock() + .read_line(&mut line) + .chain_err(|| "failed to read stdin") + .map_err(CargoError::from)?; + line.trim().to_string() + } + }; + + ops::registry_login(config, token, registry)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/metadata.rs b/src/bin/cargo/commands/metadata.rs new file mode 100644 index 000000000..b701acd61 --- /dev/null +++ b/src/bin/cargo/commands/metadata.rs @@ -0,0 +1,53 @@ +use command_prelude::*; + +use cargo::ops::{self, OutputMetadataOptions}; +use cargo::print_json; + +pub fn cli() -> App { + subcommand("metadata") + .about( + "Output the resolved dependencies of a project, \ + the concrete used versions including overrides, \ + in machine-readable format", + ) + .arg_features() + .arg(opt( + "no-deps", + "Output information only about the root package \ + and don't fetch dependencies", + )) + .arg_manifest_path() + .arg( + opt("format-version", "Format version") + .value_name("VERSION") + .possible_value("1"), + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + + let version = match args.value_of("format-version") { + None => { + config.shell().warn( + "\ + please specify `--format-version` flag explicitly \ + to avoid compatibility problems", + )?; + 1 + } + Some(version) => version.parse().unwrap(), + }; + + let options = OutputMetadataOptions { + features: values(args, "features"), + all_features: args.is_present("all-features"), + no_default_features: args.is_present("no-default-features"), + no_deps: args.is_present("no-deps"), + version, + }; + + let result = ops::output_metadata(&ws, &options)?; + print_json(&result); + Ok(()) +} diff --git a/src/bin/cargo/commands/mod.rs b/src/bin/cargo/commands/mod.rs new file mode 100644 index 000000000..fc829a855 --- /dev/null +++ b/src/bin/cargo/commands/mod.rs @@ -0,0 +1,101 @@ +use command_prelude::*; + +pub fn builtin() -> Vec { + vec![ + bench::cli(), + build::cli(), + check::cli(), + clean::cli(), + doc::cli(), + fetch::cli(), + generate_lockfile::cli(), + git_checkout::cli(), + init::cli(), + install::cli(), + locate_project::cli(), + login::cli(), + metadata::cli(), + new::cli(), + owner::cli(), + package::cli(), + pkgid::cli(), + publish::cli(), + read_manifest::cli(), + run::cli(), + rustc::cli(), + rustdoc::cli(), + search::cli(), + test::cli(), + uninstall::cli(), + update::cli(), + verify_project::cli(), + version::cli(), + yank::cli(), + ] +} + +pub fn builtin_exec(cmd: &str) -> Option CliResult> { + let f = match cmd { + "bench" => bench::exec, + "build" => build::exec, + "check" => check::exec, + "clean" => clean::exec, + "doc" => doc::exec, + "fetch" => fetch::exec, + "generate-lockfile" => generate_lockfile::exec, + "git-checkout" => git_checkout::exec, + "init" => init::exec, + "install" => install::exec, + "locate-project" => locate_project::exec, + "login" => login::exec, + "metadata" => metadata::exec, + "new" => new::exec, + "owner" => owner::exec, + "package" => package::exec, + "pkgid" => pkgid::exec, + "publish" => publish::exec, + "read-manifest" => read_manifest::exec, + "run" => run::exec, + "rustc" => rustc::exec, + "rustdoc" => rustdoc::exec, + "search" => search::exec, + "test" => test::exec, + "uninstall" => uninstall::exec, + "update" => update::exec, + "verify-project" => verify_project::exec, + "version" => version::exec, + "yank" => yank::exec, + _ => return None, + }; + Some(f) +} + +pub mod bench; +pub mod build; +pub mod check; +pub mod clean; +pub mod doc; +pub mod fetch; +pub mod generate_lockfile; +pub mod git_checkout; +pub mod init; +pub mod install; +pub mod locate_project; +pub mod login; +pub mod metadata; +pub mod new; +pub mod owner; +pub mod package; +pub mod pkgid; +pub mod publish; +pub mod read_manifest; +pub mod run; +pub mod rustc; +pub mod rustdoc; +pub mod search; +pub mod test; +pub mod uninstall; +pub mod update; +pub mod verify_project; +pub mod version; +pub mod yank; diff --git a/src/bin/cargo/commands/new.rs b/src/bin/cargo/commands/new.rs new file mode 100644 index 000000000..ff8472209 --- /dev/null +++ b/src/bin/cargo/commands/new.rs @@ -0,0 +1,26 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("new") + .about("Create a new cargo package at ") + .arg(Arg::with_name("path").required(true)) + .arg_new_opts() +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let opts = args.new_options(config)?; + + ops::new(&opts, config)?; + let path = args.value_of("path").unwrap(); + let project_name = if let Some(name) = args.value_of("name") { + name + } else { + path + }; + config + .shell() + .status("Created", format!("{} `{}` project", opts.kind, project_name))?; + Ok(()) +} diff --git a/src/bin/cargo/commands/owner.rs b/src/bin/cargo/commands/owner.rs new file mode 100644 index 000000000..f20be31b1 --- /dev/null +++ b/src/bin/cargo/commands/owner.rs @@ -0,0 +1,49 @@ +use command_prelude::*; + +use cargo::ops::{self, OwnersOptions}; + +pub fn cli() -> App { + subcommand("owner") + .about("Manage the owners of a crate on the registry") + .arg(Arg::with_name("crate")) + .arg(multi_opt("add", "LOGIN", "Name of a user or team to add as an owner").short("a")) + .arg( + multi_opt( + "remove", + "LOGIN", + "Name of a user or team to remove as an owner", + ).short("r"), + ) + .arg(opt("list", "List owners of a crate").short("l")) + .arg(opt("index", "Registry index to modify owners for").value_name("INDEX")) + .arg(opt("token", "API token to use when authenticating").value_name("TOKEN")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .after_help( + "\ + This command will modify the owners for a package + on the specified registry(or + default).Note that owners of a package can upload new versions, yank old + versions.Explicitly named owners can also modify the set of owners, so take + caution! + + See http://doc.crates.io/crates-io.html#cargo-owner for detailed documentation + and troubleshooting.", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let registry = args.registry(config)?; + let opts = OwnersOptions { + krate: args.value_of("crate").map(|s| s.to_string()), + token: args.value_of("token").map(|s| s.to_string()), + index: args.value_of("index").map(|s| s.to_string()), + to_add: args.values_of("add") + .map(|xs| xs.map(|s| s.to_string()).collect()), + to_remove: args.values_of("remove") + .map(|xs| xs.map(|s| s.to_string()).collect()), + list: args.is_present("list"), + registry, + }; + ops::modify_owners(config, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/package.rs b/src/bin/cargo/commands/package.rs new file mode 100644 index 000000000..f5e9d9184 --- /dev/null +++ b/src/bin/cargo/commands/package.rs @@ -0,0 +1,48 @@ +use command_prelude::*; + +use cargo::ops::{self, PackageOpts}; + +pub fn cli() -> App { + subcommand("package") + .about("Assemble the local package into a distributable tarball") + .arg( + opt( + "list", + "Print files included in a package without making one", + ).short("l"), + ) + .arg(opt( + "no-verify", + "Don't verify the contents by building them", + )) + .arg(opt( + "no-metadata", + "Ignore warnings about a lack of human-usable metadata", + )) + .arg(opt( + "allow-dirty", + "Allow dirty working directories to be packaged", + )) + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_jobs() +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + ops::package( + &ws, + &PackageOpts { + config, + verify: !args.is_present("no-verify"), + list: args.is_present("list"), + check_metadata: !args.is_present("no-metadata"), + allow_dirty: args.is_present("allow-dirty"), + target: args.target(), + jobs: args.jobs()?, + registry: None, + }, + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/pkgid.rs b/src/bin/cargo/commands/pkgid.rs new file mode 100644 index 000000000..7010092d6 --- /dev/null +++ b/src/bin/cargo/commands/pkgid.rs @@ -0,0 +1,41 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("pkgid") + .about("Print a fully qualified package specification") + .arg(Arg::with_name("spec")) + .arg_package("Argument to get the package id specifier for") + .arg_manifest_path() + .after_help( + "\ +Given a argument, print out the fully qualified package id specifier. +This command will generate an error if is ambiguous as to which package +it refers to in the dependency graph. If no is given, then the pkgid for +the local package is printed. + +This command requires that a lockfile is available and dependencies have been +fetched. + +Example Package IDs + + pkgid | name | version | url + |-----------------------------|--------|-----------|---------------------| + foo | foo | * | * + foo:1.2.3 | foo | 1.2.3 | * + crates.io/foo | foo | * | *://crates.io/foo + crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo + crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar + http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let spec = args.value_of("spec").or(args.value_of("package")); + let spec = ops::pkgid(&ws, spec)?; + println!("{}", spec); + Ok(()) +} diff --git a/src/bin/cargo/commands/publish.rs b/src/bin/cargo/commands/publish.rs new file mode 100644 index 000000000..b50d3619c --- /dev/null +++ b/src/bin/cargo/commands/publish.rs @@ -0,0 +1,46 @@ +use command_prelude::*; + +use cargo::ops::{self, PublishOpts}; + +pub fn cli() -> App { + subcommand("publish") + .about("Upload a package to the registry") + .arg_index() + .arg(opt("token", "Token to use when uploading").value_name("TOKEN")) + .arg(opt( + "no-verify", + "Don't verify the contents by building them", + )) + .arg(opt( + "allow-dirty", + "Allow dirty working directories to be packaged", + )) + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_jobs() + .arg(opt("dry-run", "Perform all checks without uploading")) + .arg(opt("registry", "Registry to publish to").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let registry = args.registry(config)?; + let ws = args.workspace(config)?; + let index = args.index(config)?; + + ops::publish( + &ws, + &PublishOpts { + config, + token: args.value_of("token").map(|s| s.to_string()), + index, + verify: !args.is_present("no-verify"), + allow_dirty: args.is_present("allow-dirty"), + target: args.target(), + jobs: args.jobs()?, + dry_run: args.is_present("dry-run"), + registry, + }, + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/read_manifest.rs b/src/bin/cargo/commands/read_manifest.rs new file mode 100644 index 000000000..1e54c79e8 --- /dev/null +++ b/src/bin/cargo/commands/read_manifest.rs @@ -0,0 +1,18 @@ +use command_prelude::*; + +use cargo::print_json; + +pub fn cli() -> App { + subcommand("read-manifest") + .about( + "Deprecated, use `cargo metadata --no-deps` instead. +Print a JSON representation of a Cargo.toml manifest.", + ) + .arg_manifest_path() +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + print_json(&ws.current()?); + Ok(()) +} diff --git a/src/bin/cargo/commands/run.rs b/src/bin/cargo/commands/run.rs new file mode 100644 index 000000000..683b47b88 --- /dev/null +++ b/src/bin/cargo/commands/run.rs @@ -0,0 +1,69 @@ +use command_prelude::*; + +use cargo::core::Verbosity; +use cargo::ops::{self, CompileFilter}; + +pub fn cli() -> App { + subcommand("run") + .alias("r") + .setting(AppSettings::TrailingVarArg) + .about("Run the main binary of the local package (src/main.rs)") + .arg(Arg::with_name("args").multiple(true)) + .arg_targets_bin_example( + "Name of the bin target to run", + "Name of the example target to run", + ) + .arg_package("Package with the target to run") + .arg_jobs() + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +If neither `--bin` nor `--example` are given, then if the project only has one +bin target it will be run. Otherwise `--bin` specifies the bin target to run, +and `--example` specifies the example target to run. At most one of `--bin` or +`--example` can be provided. + +All the arguments following the two dashes (`--`) are passed to the binary to +run. If you're passing arguments to both Cargo and the binary, the ones after +`--` go to the binary, the ones before go to Cargo. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + + let mut compile_opts = args.compile_options_for_single_package(config, CompileMode::Build)?; + if !args.is_present("example") && !args.is_present("bin") { + compile_opts.filter = CompileFilter::Default { + required_features_filterable: false, + }; + }; + match ops::run(&ws, &compile_opts, &values(args, "args"))? { + None => Ok(()), + Some(err) => { + // If we never actually spawned the process then that sounds pretty + // bad and we always want to forward that up. + let exit = match err.exit { + Some(exit) => exit, + None => return Err(CliError::new(err.into(), 101)), + }; + + // If `-q` was passed then we suppress extra error information about + // a failed process, we assume the process itself printed out enough + // information about why it failed so we don't do so as well + let exit_code = exit.code().unwrap_or(101); + let is_quiet = config.shell().verbosity() == Verbosity::Quiet; + Err(if is_quiet { + CliError::code(exit_code) + } else { + CliError::new(err.into(), exit_code) + }) + } + } +} diff --git a/src/bin/cargo/commands/rustc.rs b/src/bin/cargo/commands/rustc.rs new file mode 100644 index 000000000..a5ca4deff --- /dev/null +++ b/src/bin/cargo/commands/rustc.rs @@ -0,0 +1,74 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("rustc") + .setting(AppSettings::TrailingVarArg) + .about("Compile a package and all of its dependencies") + .arg(Arg::with_name("args").multiple(true)) + .arg_package("Package to build") + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets (lib and bin targets by default)", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg(opt("profile", "Profile to build the selected target for").value_name("PROFILE")) + .arg_features() + .arg_target_triple("Target triple which compiles will be for") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The specified target for the current package (or package specified by SPEC if +provided) will be compiled along with all of its dependencies. The specified +... will all be passed to the final compiler invocation, not any of the +dependencies. Note that the compiler will still unconditionally receive +arguments such as -L, --extern, and --crate-type, and the specified ... +will simply be added to the compiler invocation. + +This command requires that only one target is being compiled. If more than one +target is available for the current package the filters of --lib, --bin, etc, +must be used to select which target is compiled. To pass flags to all compiler +processes spawned by Cargo, use the $RUSTFLAGS environment variable or the +`build.rustflags` configuration option. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let mode = match args.value_of("profile") { + Some("dev") | None => CompileMode::Build, + Some("test") => CompileMode::Test, + Some("bench") => CompileMode::Bench, + Some("check") => CompileMode::Check { test: false }, + Some(mode) => { + let err = format_err!( + "unknown profile: `{}`, use dev, + test, or bench", + mode + ); + return Err(CliError::new(err, 101)); + } + }; + let mut compile_opts = args.compile_options_for_single_package(config, mode)?; + let target_args = values(args, "args"); + compile_opts.target_rustc_args = if target_args.is_empty() { + None + } else { + Some(target_args) + }; + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/rustdoc.rs b/src/bin/cargo/commands/rustdoc.rs new file mode 100644 index 000000000..194eaaef8 --- /dev/null +++ b/src/bin/cargo/commands/rustdoc.rs @@ -0,0 +1,66 @@ +use command_prelude::*; + +use cargo::ops::{self, DocOptions}; + +pub fn cli() -> App { + subcommand("rustdoc") + .setting(AppSettings::TrailingVarArg) + .about("Build a package's documentation, using specified custom flags.") + .arg(Arg::with_name("args").multiple(true)) + .arg(opt( + "open", + "Opens the docs in a browser after the operation", + )) + .arg_package("Package to document") + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets (default)", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The specified target for the current package (or package specified by SPEC if +provided) will be documented with the specified ... being passed to the +final rustdoc invocation. Dependencies will not be documented as part of this +command. Note that rustdoc will still unconditionally receive arguments such +as -L, --extern, and --crate-type, and the specified ... will simply be +added to the rustdoc invocation. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = + args.compile_options_for_single_package(config, CompileMode::Doc { deps: false })?; + let target_args = values(args, "args"); + compile_opts.target_rustdoc_args = if target_args.is_empty() { + None + } else { + Some(target_args) + }; + let doc_opts = DocOptions { + open_result: args.is_present("open"), + compile_opts, + }; + ops::doc(&ws, &doc_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/search.rs b/src/bin/cargo/commands/search.rs new file mode 100644 index 000000000..0501d8e5f --- /dev/null +++ b/src/bin/cargo/commands/search.rs @@ -0,0 +1,30 @@ +use command_prelude::*; + +use std::cmp::min; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("search") + .about("Search packages in crates.io") + .arg(Arg::with_name("query").multiple(true)) + .arg_index() + .arg( + opt( + "limit", + "Limit the number of results (default: 10, max: 100)", + ).value_name("LIMIT"), + ) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let registry = args.registry(config)?; + let index = args.index(config)?; + let limit = args.value_of_u32("limit")?; + let limit = min(100, limit.unwrap_or(10)); + let query: Vec<&str> = args.values_of("query").unwrap_or_default().collect(); + let query: String = query.join("+"); + ops::search(&query, config, index, limit, registry)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/test.rs b/src/bin/cargo/commands/test.rs new file mode 100644 index 000000000..f0c352bca --- /dev/null +++ b/src/bin/cargo/commands/test.rs @@ -0,0 +1,137 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("test") + .alias("t") + .setting(AppSettings::TrailingVarArg) + .about("Execute all unit and integration tests of a local package") + .arg( + Arg::with_name("TESTNAME") + .help("If specified, only run tests containing this string in their names"), + ) + .arg( + Arg::with_name("args") + .help("Arguments for the test binary") + .multiple(true) + .last(true), + ) + .arg_targets_all( + "Test only this package's library", + "Test only the specified binary", + "Test all binaries", + "Check that the specified examples compile", + "Check that all examples compile", + "Test only the specified test target", + "Test all tests", + "Test only the specified bench target", + "Test all benches", + "Test all targets (default)", + ) + .arg(opt("doc", "Test only this library's documentation")) + .arg(opt("no-run", "Compile, but don't run tests")) + .arg(opt("no-fail-fast", "Run all tests regardless of failure")) + .arg_package_spec( + "Package to run tests for", + "Test all packages in the workspace", + "Exclude packages from the test", + ) + .arg_jobs() + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The test filtering argument `TESTNAME` and all the arguments following the +two dashes (`--`) are passed to the test binaries and thus to libtest +(rustc's built in unit-test and micro-benchmarking framework). If you're +passing arguments to both Cargo and the binary, the ones after `--` go to the +binary, the ones before go to Cargo. For details about libtest's arguments see +the output of `cargo test -- --help`. As an example, this will run all +tests with `foo` in their name on 3 threads in parallel: + + cargo test foo -- --test-threads 3 + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be tested. If it is not given, then the +current package is tested. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are tested if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The --jobs argument affects the building of the test executable but does +not affect how many jobs are used when running the tests. The default value +for the --jobs argument is the number of CPUs. If you want to control the +number of simultaneous running test cases, pass the `--test-threads` option +to the test binaries: + + cargo test -- --test-threads=1 + +Compilation can be configured via the `test` profile in the manifest. + +By default the rust test harness hides output from test execution to +keep results readable. Test output can be recovered (e.g. for debugging) +by passing `--nocapture` to the test binaries: + + cargo test -- --nocapture + +To get the list of all options available for the test binaries use this: + + cargo test -- --help +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + + let mut compile_opts = args.compile_options(config, CompileMode::Test)?; + let doc = args.is_present("doc"); + if doc { + compile_opts.build_config.mode = CompileMode::Doctest; + compile_opts.filter = ops::CompileFilter::new( + true, + Vec::new(), + false, + Vec::new(), + false, + Vec::new(), + false, + Vec::new(), + false, + false, + ); + } + + let ops = ops::TestOptions { + no_run: args.is_present("no-run"), + no_fail_fast: args.is_present("no-fail-fast"), + only_doc: doc, + compile_opts, + }; + + // TESTNAME is actually an argument of the test binary, but it's + // important so we explicitly mention it and reconfigure + let mut test_args = vec![]; + test_args.extend(args.value_of("TESTNAME").into_iter().map(|s| s.to_string())); + test_args.extend( + args.values_of("args") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + + let err = ops::run_tests(&ws, &ops, &test_args)?; + return match err { + None => Ok(()), + Some(err) => Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new(format_err!("{}", err.hint(&ws)), i), + None => CliError::new(err.into(), 101), + }), + }; +} diff --git a/src/bin/cargo/commands/uninstall.rs b/src/bin/cargo/commands/uninstall.rs new file mode 100644 index 000000000..203185119 --- /dev/null +++ b/src/bin/cargo/commands/uninstall.rs @@ -0,0 +1,26 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("uninstall") + .about("Remove a Rust binary") + .arg(Arg::with_name("spec").multiple(true)) + .arg(multi_opt("bin", "NAME", "Only uninstall the binary NAME")) + .arg(opt("root", "Directory to uninstall packages from").value_name("DIR")) + .after_help( + "\ +The argument SPEC is a package id specification (see `cargo help pkgid`) to +specify which crate should be uninstalled. By default all binaries are +uninstalled for a crate but the `--bin` and `--example` flags can be used to +only uninstall particular binaries. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let root = args.value_of("root"); + let specs = args.values_of("spec").unwrap_or_default().collect(); + ops::uninstall(root, specs, &values(args, "bin"), config)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/update.rs b/src/bin/cargo/commands/update.rs new file mode 100644 index 000000000..c5a992a3d --- /dev/null +++ b/src/bin/cargo/commands/update.rs @@ -0,0 +1,51 @@ +use command_prelude::*; + +use cargo::ops::{self, UpdateOptions}; + +pub fn cli() -> App { + subcommand("update") + .about("Update dependencies as recorded in the local lock file") + .arg_package_spec_simple("Package to update") + .arg(opt( + "aggressive", + "Force updating all dependencies of as well", + )) + .arg(opt("precise", "Update a single dependency to exactly PRECISE").value_name("PRECISE")) + .arg_manifest_path() + .after_help( + "\ +This command requires that a `Cargo.lock` already exists as generated by +`cargo build` or related commands. + +If SPEC is given, then a conservative update of the lockfile will be +performed. This means that only the dependency specified by SPEC will be +updated. Its transitive dependencies will be updated only if SPEC cannot be +updated without updating dependencies. All other dependencies will remain +locked at their currently recorded versions. + +If PRECISE is specified, then --aggressive must not also be specified. The +argument PRECISE is a string representing a precise revision that the package +being updated should be updated to. For example, if the package comes from a git +repository, then PRECISE would be the exact revision that the repository should +be updated to. + +If SPEC is not given, then all dependencies will be re-resolved and +updated. + +For more information about package id specifications, see `cargo help pkgid`. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let ws = args.workspace(config)?; + + let update_opts = UpdateOptions { + aggressive: args.is_present("aggressive"), + precise: args.value_of("precise"), + to_update: values(args, "package"), + config, + }; + ops::update_lockfile(&ws, &update_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/verify_project.rs b/src/bin/cargo/commands/verify_project.rs new file mode 100644 index 000000000..eea65c775 --- /dev/null +++ b/src/bin/cargo/commands/verify_project.rs @@ -0,0 +1,45 @@ +use command_prelude::*; + +use std::collections::HashMap; +use std::process; +use std::fs::File; +use std::io::Read; + +use toml; + +use cargo::print_json; + +pub fn cli() -> App { + subcommand("verify-project") + .about("Check correctness of crate manifest") + .arg_manifest_path() +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + fn fail(reason: &str, value: &str) -> ! { + let mut h = HashMap::new(); + h.insert(reason.to_string(), value.to_string()); + print_json(&h); + process::exit(1) + } + + let mut contents = String::new(); + let filename = match args.root_manifest(config) { + Ok(filename) => filename, + Err(e) => fail("invalid", &e.to_string()), + }; + + let file = File::open(&filename); + match file.and_then(|mut f| f.read_to_string(&mut contents)) { + Ok(_) => {} + Err(e) => fail("invalid", &format!("error reading file: {}", e)), + }; + if contents.parse::().is_err() { + fail("invalid", "invalid-format"); + } + + let mut h = HashMap::new(); + h.insert("success".to_string(), "true".to_string()); + print_json(&h); + Ok(()) +} diff --git a/src/bin/cargo/commands/version.rs b/src/bin/cargo/commands/version.rs new file mode 100644 index 000000000..0e9d5be52 --- /dev/null +++ b/src/bin/cargo/commands/version.rs @@ -0,0 +1,12 @@ +use command_prelude::*; + +use cargo; + +pub fn cli() -> App { + subcommand("version").about("Show version information") +} + +pub fn exec(_config: &mut Config, _args: &ArgMatches) -> CliResult { + println!("{}", cargo::version()); + Ok(()) +} diff --git a/src/bin/cargo/commands/yank.rs b/src/bin/cargo/commands/yank.rs new file mode 100644 index 000000000..150474be8 --- /dev/null +++ b/src/bin/cargo/commands/yank.rs @@ -0,0 +1,43 @@ +use command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("yank") + .about("Remove a pushed crate from the index") + .arg(Arg::with_name("crate")) + .arg(opt("vers", "The version to yank or un-yank").value_name("VERSION")) + .arg(opt( + "undo", + "Undo a yank, putting a version back into the index", + )) + .arg(opt("index", "Registry index to yank from").value_name("INDEX")) + .arg(opt("token", "API token to use when authenticating").value_name("TOKEN")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .after_help( + "\ +The yank command removes a previously pushed crate's version from the server's +index. This command does not delete any data, and the crate will still be +available for download via the registry's download link. + +Note that existing crates locked to a yanked version will still be able to +download the yanked version to use it. Cargo will, however, not allow any new +crates to be locked to any yanked version. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches) -> CliResult { + let registry = args.registry(config)?; + + ops::yank( + config, + args.value_of("crate").map(|s| s.to_string()), + args.value_of("vers").map(|s| s.to_string()), + args.value_of("token").map(|s| s.to_string()), + args.value_of("index").map(|s| s.to_string()), + args.is_present("undo"), + registry, + )?; + Ok(()) +} diff --git a/src/bin/cargo/main.rs b/src/bin/cargo/main.rs new file mode 100644 index 000000000..7d7c7db6e --- /dev/null +++ b/src/bin/cargo/main.rs @@ -0,0 +1,212 @@ +extern crate cargo; +extern crate clap; +extern crate env_logger; +#[macro_use] +extern crate failure; +extern crate git2_curl; +extern crate log; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; +extern crate toml; + +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; +use std::collections::BTreeSet; + +use cargo::core::shell::Shell; +use cargo::util::{self, lev_distance, CargoResult, CliResult, Config}; +use cargo::util::{CliError, ProcessError}; + +mod cli; +mod command_prelude; +mod commands; + +fn main() { + env_logger::init(); + + let mut config = match Config::default() { + Ok(cfg) => cfg, + Err(e) => { + let mut shell = Shell::new(); + cargo::exit_with_error(e.into(), &mut shell) + } + }; + + let result = { + init_git_transports(&mut config); + let _token = cargo::util::job::setup(); + cli::main(&mut config) + }; + + match result { + Err(e) => cargo::exit_with_error(e, &mut *config.shell()), + Ok(()) => {} + } +} + +fn aliased_command(config: &Config, command: &str) -> CargoResult>> { + let alias_name = format!("alias.{}", command); + let mut result = Ok(None); + match config.get_string(&alias_name) { + Ok(value) => { + if let Some(record) = value { + let alias_commands = record + .val + .split_whitespace() + .map(|s| s.to_string()) + .collect(); + result = Ok(Some(alias_commands)); + } + } + Err(_) => { + let value = config.get_list(&alias_name)?; + if let Some(record) = value { + let alias_commands: Vec = + record.val.iter().map(|s| s.0.to_string()).collect(); + result = Ok(Some(alias_commands)); + } + } + } + result +} + +/// List all runnable commands +fn list_commands(config: &Config) -> BTreeSet<(String, Option)> { + let prefix = "cargo-"; + let suffix = env::consts::EXE_SUFFIX; + let mut commands = BTreeSet::new(); + for dir in search_directories(config) { + let entries = match fs::read_dir(dir) { + Ok(entries) => entries, + _ => continue, + }; + for entry in entries.filter_map(|e| e.ok()) { + let path = entry.path(); + let filename = match path.file_name().and_then(|s| s.to_str()) { + Some(filename) => filename, + _ => continue, + }; + if !filename.starts_with(prefix) || !filename.ends_with(suffix) { + continue; + } + if is_executable(entry.path()) { + let end = filename.len() - suffix.len(); + commands.insert(( + filename[prefix.len()..end].to_string(), + Some(path.display().to_string()), + )); + } + } + } + + for cmd in commands::builtin() { + commands.insert((cmd.get_name().to_string(), None)); + } + + commands +} + +fn find_closest(config: &Config, cmd: &str) -> Option { + let cmds = list_commands(config); + // Only consider candidates with a lev_distance of 3 or less so we don't + // suggest out-of-the-blue options. + let mut filtered = cmds.iter() + .map(|&(ref c, _)| (lev_distance(c, cmd), c)) + .filter(|&(d, _)| d < 4) + .collect::>(); + filtered.sort_by(|a, b| a.0.cmp(&b.0)); + filtered.get(0).map(|slot| slot.1.clone()) +} + +fn execute_external_subcommand(config: &Config, cmd: &str, args: &[&str]) -> CliResult { + let command_exe = format!("cargo-{}{}", cmd, env::consts::EXE_SUFFIX); + let path = search_directories(config) + .iter() + .map(|dir| dir.join(&command_exe)) + .find(|file| is_executable(file)); + let command = match path { + Some(command) => command, + None => { + let err = match find_closest(config, cmd) { + Some(closest) => format_err!( + "no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", + cmd, + closest + ), + None => format_err!("no such subcommand: `{}`", cmd), + }; + return Err(CliError::new(err, 101)); + } + }; + + let cargo_exe = config.cargo_exe()?; + let err = match util::process(&command) + .env(cargo::CARGO_ENV, cargo_exe) + .args(args) + .exec_replace() + { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + if let Some(perr) = err.downcast_ref::() { + if let Some(code) = perr.exit.as_ref().and_then(|c| c.code()) { + return Err(CliError::code(code)); + } + } + Err(CliError::new(err, 101)) +} + +#[cfg(unix)] +fn is_executable>(path: P) -> bool { + use std::os::unix::prelude::*; + fs::metadata(path) + .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111 != 0) + .unwrap_or(false) +} +#[cfg(windows)] +fn is_executable>(path: P) -> bool { + fs::metadata(path) + .map(|metadata| metadata.is_file()) + .unwrap_or(false) +} + +fn search_directories(config: &Config) -> Vec { + let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; + if let Some(val) = env::var_os("PATH") { + dirs.extend(env::split_paths(&val)); + } + dirs +} + +fn init_git_transports(config: &Config) { + // Only use a custom transport if any HTTP options are specified, + // such as proxies or custom certificate authorities. The custom + // transport, however, is not as well battle-tested. + + match cargo::ops::needs_custom_http_transport(config) { + Ok(true) => {} + _ => return, + } + + let handle = match cargo::ops::http_handle(config) { + Ok(handle) => handle, + Err(..) => return, + }; + + // The unsafety of the registration function derives from two aspects: + // + // 1. This call must be synchronized with all other registration calls as + // well as construction of new transports. + // 2. The argument is leaked. + // + // We're clear on point (1) because this is only called at the start of this + // binary (we know what the state of the world looks like) and we're mostly + // clear on point (2) because we'd only free it after everything is done + // anyway + unsafe { + git2_curl::register(handle); + } +} diff --git a/src/cargo/core/compiler/build_config.rs b/src/cargo/core/compiler/build_config.rs new file mode 100644 index 000000000..e4d1377b7 --- /dev/null +++ b/src/cargo/core/compiler/build_config.rs @@ -0,0 +1,190 @@ +use std::path::Path; +use util::{CargoResult, CargoResultExt, Config}; + +/// Configuration information for a rustc build. +#[derive(Debug)] +pub struct BuildConfig { + /// The target arch triple, defaults to host arch + pub requested_target: Option, + /// How many rustc jobs to run in parallel + pub jobs: u32, + /// Whether we are building for release + pub release: bool, + /// In what mode we are compiling + pub mode: CompileMode, + /// Whether to print std output in json format (for machine reading) + pub message_format: MessageFormat, +} + +impl BuildConfig { + /// Parse all config files to learn about build configuration. Currently + /// configured options are: + /// + /// * build.jobs + /// * build.target + /// * target.$target.ar + /// * target.$target.linker + /// * target.$target.libfoo.metadata + pub fn new( + config: &Config, + jobs: Option, + requested_target: &Option, + mode: CompileMode, + ) -> CargoResult { + let requested_target = match requested_target { + &Some(ref target) if target.ends_with(".json") => { + let path = Path::new(target) + .canonicalize() + .chain_err(|| format_err!("Target path {:?} is not a valid file", target))?; + Some(path.into_os_string() + .into_string() + .map_err(|_| format_err!("Target path is not valid unicode"))?) + } + other => other.clone(), + }; + if let Some(ref s) = requested_target { + if s.trim().is_empty() { + bail!("target was empty") + } + } + let cfg_target = config.get_string("build.target")?.map(|s| s.val); + let target = requested_target.clone().or(cfg_target); + + if jobs == Some(0) { + bail!("jobs must be at least 1") + } + if jobs.is_some() && config.jobserver_from_env().is_some() { + config.shell().warn( + "a `-j` argument was passed to Cargo but Cargo is \ + also configured with an external jobserver in \ + its environment, ignoring the `-j` parameter", + )?; + } + let cfg_jobs = match config.get_i64("build.jobs")? { + Some(v) => { + if v.val <= 0 { + bail!( + "build.jobs must be positive, but found {} in {}", + v.val, + v.definition + ) + } else if v.val >= i64::from(u32::max_value()) { + bail!( + "build.jobs is too large: found {} in {}", + v.val, + v.definition + ) + } else { + Some(v.val as u32) + } + } + None => None, + }; + let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32); + Ok(BuildConfig { + requested_target: target, + jobs, + release: false, + mode, + message_format: MessageFormat::Human, + }) + } + + pub fn json_messages(&self) -> bool { + self.message_format == MessageFormat::Json + } + + pub fn test(&self) -> bool { + self.mode == CompileMode::Test || self.mode == CompileMode::Bench + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum MessageFormat { + Human, + Json, +} + +/// The general "mode" of what to do. +/// This is used for two purposes. The commands themselves pass this in to +/// `compile_ws` to tell it the general execution strategy. This influences +/// the default targets selected. The other use is in the `Unit` struct +/// to indicate what is being done with a specific target. +#[derive(Clone, Copy, PartialEq, Debug, Eq, Hash)] +pub enum CompileMode { + /// A target being built for a test. + Test, + /// Building a target with `rustc` (lib or bin). + Build, + /// Building a target with `rustc` to emit `rmeta` metadata only. If + /// `test` is true, then it is also compiled with `--test` to check it like + /// a test. + Check { test: bool }, + /// Used to indicate benchmarks should be built. This is not used in + /// `Target` because it is essentially the same as `Test` (indicating + /// `--test` should be passed to rustc) and by using `Test` instead it + /// allows some de-duping of Units to occur. + Bench, + /// A target that will be documented with `rustdoc`. + /// If `deps` is true, then it will also document all dependencies. + Doc { deps: bool }, + /// A target that will be tested with `rustdoc`. + Doctest, + /// A marker for Units that represent the execution of a `build.rs` + /// script. + RunCustomBuild, +} + +impl CompileMode { + /// Returns true if the unit is being checked. + pub fn is_check(&self) -> bool { + match *self { + CompileMode::Check { .. } => true, + _ => false, + } + } + + /// Returns true if this is a doc or doctest. Be careful using this. + /// Although both run rustdoc, the dependencies for those two modes are + /// very different. + pub fn is_doc(&self) -> bool { + match *self { + CompileMode::Doc { .. } | CompileMode::Doctest => true, + _ => false, + } + } + + /// Returns true if this is any type of test (test, benchmark, doctest, or + /// check-test). + pub fn is_any_test(&self) -> bool { + match *self { + CompileMode::Test + | CompileMode::Bench + | CompileMode::Check { test: true } + | CompileMode::Doctest => true, + _ => false, + } + } + + /// Returns true if this is the *execution* of a `build.rs` script. + pub fn is_run_custom_build(&self) -> bool { + *self == CompileMode::RunCustomBuild + } + + /// List of all modes (currently used by `cargo clean -p` for computing + /// all possible outputs). + pub fn all_modes() -> &'static [CompileMode] { + static ALL: [CompileMode; 9] = [ + CompileMode::Test, + CompileMode::Build, + CompileMode::Check { test: true }, + CompileMode::Check { test: false }, + CompileMode::Bench, + CompileMode::Doc { deps: true }, + CompileMode::Doc { deps: false }, + CompileMode::Doctest, + CompileMode::RunCustomBuild, + ]; + &ALL + } +} diff --git a/src/cargo/core/compiler/build_context/mod.rs b/src/cargo/core/compiler/build_context/mod.rs new file mode 100644 index 000000000..1e7112b9a --- /dev/null +++ b/src/cargo/core/compiler/build_context/mod.rs @@ -0,0 +1,452 @@ +use std::collections::HashMap; +use std::env; +use std::path::{Path, PathBuf}; +use std::str::{self, FromStr}; + +use core::profiles::Profiles; +use core::{Dependency, Workspace}; +use core::{Package, PackageId, PackageSet, Resolve}; +use util::errors::CargoResult; +use util::{profile, Cfg, CfgExpr, Config, Rustc}; + +use super::{BuildConfig, BuildOutput, Kind, Unit}; + +mod target_info; +pub use self::target_info::{FileFlavor, TargetInfo}; + +/// The build context, containing all information about a build task +pub struct BuildContext<'a, 'cfg: 'a> { + /// The workspace the build is for + pub ws: &'a Workspace<'cfg>, + /// The cargo configuration + pub config: &'cfg Config, + /// The dependency graph for our build + pub resolve: &'a Resolve, + pub profiles: &'a Profiles, + pub build_config: &'a BuildConfig, + /// This is a workaround to carry the extra compiler args for either + /// `rustc` or `rustdoc` given on the command-line for the commands `cargo + /// rustc` and `cargo rustdoc`. These commands only support one target, + /// but we don't want the args passed to any dependencies, so we include + /// the `Unit` corresponding to the top-level target. + pub extra_compiler_args: Option<(Unit<'a>, Vec)>, + pub packages: &'a PackageSet<'cfg>, + + /// Information about the compiler + pub rustc: Rustc, + /// Build information for the host arch + pub host_config: TargetConfig, + /// Build information for the target + pub target_config: TargetConfig, + pub target_info: TargetInfo, + pub host_info: TargetInfo, + pub incremental_env: Option, +} + +impl<'a, 'cfg> BuildContext<'a, 'cfg> { + pub fn new( + ws: &'a Workspace<'cfg>, + resolve: &'a Resolve, + packages: &'a PackageSet<'cfg>, + config: &'cfg Config, + build_config: &'a BuildConfig, + profiles: &'a Profiles, + extra_compiler_args: Option<(Unit<'a>, Vec)>, + ) -> CargoResult> { + let incremental_env = match env::var("CARGO_INCREMENTAL") { + Ok(v) => Some(v == "1"), + Err(_) => None, + }; + + let rustc = config.rustc(Some(ws))?; + let host_config = TargetConfig::new(config, &rustc.host)?; + let target_config = match build_config.requested_target.as_ref() { + Some(triple) => TargetConfig::new(config, triple)?, + None => host_config.clone(), + }; + let (host_info, target_info) = { + let _p = profile::start("BuildContext::probe_target_info"); + debug!("probe_target_info"); + let host_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Host)?; + let target_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; + (host_info, target_info) + }; + + Ok(BuildContext { + ws, + resolve, + packages, + config, + rustc, + target_config, + target_info, + host_config, + host_info, + build_config, + profiles, + incremental_env, + extra_compiler_args, + }) + } + + pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { + let deps = { + let a = unit.pkg.package_id(); + let b = dep.pkg.package_id(); + if a == b { + &[] + } else { + self.resolve.dependencies_listed(a, b) + } + }; + + let crate_name = dep.target.crate_name(); + let mut names = deps.iter() + .map(|d| d.rename().unwrap_or(&crate_name)); + let name = names.next().unwrap_or(&crate_name); + for n in names { + if n == name { + continue + } + bail!("multiple dependencies listed for the same crate must \ + all have the same name, but the dependency on `{}` \ + is listed as having different names", dep.pkg.package_id()); + } + Ok(name.to_string()) + } + + /// Whether a dependency should be compiled for the host or target platform, + /// specified by `Kind`. + pub fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + let platform = match dep.platform() { + Some(p) => p, + None => return true, + }; + let (name, info) = match kind { + Kind::Host => (self.host_triple(), &self.host_info), + Kind::Target => (self.target_triple(), &self.target_info), + }; + platform.matches(name, info.cfg()) + } + + /// Gets a package for the given package id. + pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> { + self.packages.get(id) + } + + /// Get the user-specified linker for a particular host or target + pub fn linker(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) + } + + /// Get the user-specified `ar` program for a particular host or target + pub fn ar(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) + } + + /// Get the list of cfg printed out from the compiler for the specified kind + pub fn cfg(&self, kind: Kind) -> &[Cfg] { + let info = match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + }; + info.cfg().unwrap_or(&[]) + } + + /// The host arch triple + /// + /// e.g. x86_64-unknown-linux-gnu, would be + /// - machine: x86_64 + /// - hardware-platform: unknown + /// - operating system: linux-gnu + pub fn host_triple(&self) -> &str { + &self.rustc.host + } + + pub fn target_triple(&self) -> &str { + self.build_config + .requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(self.host_triple()) + } + + /// Get the target configuration for a particular host or target + fn target_config(&self, kind: Kind) -> &TargetConfig { + match kind { + Kind::Host => &self.host_config, + Kind::Target => &self.target_config, + } + } + + /// Number of jobs specified for this build + pub fn jobs(&self) -> u32 { + self.build_config.jobs + } + + pub fn rustflags_args(&self, unit: &Unit) -> CargoResult> { + env_args( + self.config, + &self.build_config.requested_target, + self.host_triple(), + self.info(&unit.kind).cfg(), + unit.kind, + "RUSTFLAGS", + ) + } + + pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult> { + env_args( + self.config, + &self.build_config.requested_target, + self.host_triple(), + self.info(&unit.kind).cfg(), + unit.kind, + "RUSTDOCFLAGS", + ) + } + + pub fn show_warnings(&self, pkg: &PackageId) -> bool { + pkg.source_id().is_path() || self.config.extra_verbose() + } + + fn info(&self, kind: &Kind) -> &TargetInfo { + match *kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } + + pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { + if let Some((ref args_unit, ref args)) = self.extra_compiler_args { + if args_unit == unit { + return Some(args); + } + } + None + } +} + +/// Information required to build for a target +#[derive(Clone, Default)] +pub struct TargetConfig { + /// The path of archiver (lib builder) for this target. + pub ar: Option, + /// The path of the linker for this target. + pub linker: Option, + /// Special build options for any necessary input files (filename -> options) + pub overrides: HashMap, +} + +impl TargetConfig { + pub fn new(config: &Config, triple: &str) -> CargoResult { + let key = format!("target.{}", triple); + let mut ret = TargetConfig { + ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val), + linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val), + overrides: HashMap::new(), + }; + let table = match config.get_table(&key)? { + Some(table) => table.val, + None => return Ok(ret), + }; + for (lib_name, value) in table { + match lib_name.as_str() { + "ar" | "linker" | "runner" | "rustflags" => continue, + _ => {} + } + + let mut output = BuildOutput { + library_paths: Vec::new(), + library_links: Vec::new(), + cfgs: Vec::new(), + env: Vec::new(), + metadata: Vec::new(), + rerun_if_changed: Vec::new(), + rerun_if_env_changed: Vec::new(), + warnings: Vec::new(), + }; + // We require deterministic order of evaluation, so we must sort the pairs by key first. + let mut pairs = Vec::new(); + for (k, value) in value.table(&lib_name)?.0 { + pairs.push((k, value)); + } + pairs.sort_by_key(|p| p.0); + for (k, value) in pairs { + let key = format!("{}.{}", key, k); + match &k[..] { + "rustc-flags" => { + let (flags, definition) = value.string(k)?; + let whence = format!("in `{}` (in {})", key, definition.display()); + let (paths, links) = BuildOutput::parse_rustc_flags(flags, &whence)?; + output.library_paths.extend(paths); + output.library_links.extend(links); + } + "rustc-link-lib" => { + let list = value.list(k)?; + output + .library_links + .extend(list.iter().map(|v| v.0.clone())); + } + "rustc-link-search" => { + let list = value.list(k)?; + output + .library_paths + .extend(list.iter().map(|v| PathBuf::from(&v.0))); + } + "rustc-cfg" => { + let list = value.list(k)?; + output.cfgs.extend(list.iter().map(|v| v.0.clone())); + } + "rustc-env" => for (name, val) in value.table(k)?.0 { + let val = val.string(name)?.0; + output.env.push((name.clone(), val.to_string())); + }, + "warning" | "rerun-if-changed" | "rerun-if-env-changed" => { + bail!("`{}` is not supported in build script overrides", k); + } + _ => { + let val = value.string(k)?.0; + output.metadata.push((k.clone(), val.to_string())); + } + } + } + ret.overrides.insert(lib_name, output); + } + + Ok(ret) + } +} + +/// Acquire extra flags to pass to the compiler from various locations. +/// +/// The locations are: +/// +/// - the `RUSTFLAGS` environment variable +/// +/// then if this was not found +/// +/// - `target.*.rustflags` from the manifest (Cargo.toml) +/// - `target.cfg(..).rustflags` from the manifest +/// +/// then if neither of these were found +/// +/// - `build.rustflags` from the manifest +/// +/// Note that if a `target` is specified, no args will be passed to host code (plugins, build +/// scripts, ...), even if it is the same as the target. +fn env_args( + config: &Config, + requested_target: &Option, + host_triple: &str, + target_cfg: Option<&[Cfg]>, + kind: Kind, + name: &str, +) -> CargoResult> { + // We *want* to apply RUSTFLAGS only to builds for the + // requested target architecture, and not to things like build + // scripts and plugins, which may be for an entirely different + // architecture. Cargo's present architecture makes it quite + // hard to only apply flags to things that are not build + // scripts and plugins though, so we do something more hacky + // instead to avoid applying the same RUSTFLAGS to multiple targets + // arches: + // + // 1) If --target is not specified we just apply RUSTFLAGS to + // all builds; they are all going to have the same target. + // + // 2) If --target *is* specified then we only apply RUSTFLAGS + // to compilation units with the Target kind, which indicates + // it was chosen by the --target flag. + // + // This means that, e.g. even if the specified --target is the + // same as the host, build scripts in plugins won't get + // RUSTFLAGS. + let compiling_with_target = requested_target.is_some(); + let is_target_kind = kind == Kind::Target; + + if compiling_with_target && !is_target_kind { + // This is probably a build script or plugin and we're + // compiling with --target. In this scenario there are + // no rustflags we can apply. + return Ok(Vec::new()); + } + + // First try RUSTFLAGS from the environment + if let Ok(a) = env::var(name) { + let args = a.split(' ') + .map(str::trim) + .filter(|s| !s.is_empty()) + .map(str::to_string); + return Ok(args.collect()); + } + + let mut rustflags = Vec::new(); + + let name = name.chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + // Then the target.*.rustflags value... + let target = requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(host_triple); + let key = format!("target.{}.{}", target, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + // ...including target.'cfg(...)'.rustflags + if let Some(target_cfg) = target_cfg { + if let Some(table) = config.get_table("target")? { + let cfgs = table.val.keys().filter_map(|t| { + if t.starts_with("cfg(") && t.ends_with(')') { + let cfg = &t[4..t.len() - 1]; + CfgExpr::from_str(cfg).ok().and_then(|c| { + if c.matches(target_cfg) { + Some(t) + } else { + None + } + }) + } else { + None + } + }); + + // Note that we may have multiple matching `[target]` sections and + // because we're passing flags to the compiler this can affect + // cargo's caching and whether it rebuilds. Ensure a deterministic + // ordering through sorting for now. We may perhaps one day wish to + // ensure a deterministic ordering via the order keys were defined + // in files perhaps. + let mut cfgs = cfgs.collect::>(); + cfgs.sort(); + + for n in cfgs { + let key = format!("target.{}.{}", n, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + } + } + } + + if !rustflags.is_empty() { + return Ok(rustflags); + } + + // Then the build.rustflags value + let key = format!("build.{}", name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + return Ok(args.collect()); + } + + Ok(Vec::new()) +} diff --git a/src/cargo/core/compiler/build_context/target_info.rs b/src/cargo/core/compiler/build_context/target_info.rs new file mode 100644 index 000000000..84950bd0d --- /dev/null +++ b/src/cargo/core/compiler/build_context/target_info.rs @@ -0,0 +1,291 @@ +use std::cell::RefCell; +use std::collections::hash_map::{Entry, HashMap}; +use std::path::PathBuf; +use std::str::{self, FromStr}; + +use super::env_args; +use util::{CargoResult, CargoResultExt, Cfg, Config, ProcessBuilder, Rustc}; +use core::TargetKind; +use super::Kind; + +#[derive(Clone)] +pub struct TargetInfo { + crate_type_process: Option, + crate_types: RefCell>>, + cfg: Option>, + pub sysroot_libdir: Option, +} + +/// Type of each file generated by a Unit. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum FileFlavor { + /// Not a special file type. + Normal, + /// It is something you can link against (e.g. a library) + Linkable, + /// It is a piece of external debug information (e.g. *.dSYM and *.pdb) + DebugInfo, +} + +pub struct FileType { + pub flavor: FileFlavor, + suffix: String, + prefix: String, + // wasm bin target will generate two files in deps such as + // "web-stuff.js" and "web_stuff.wasm". Note the different usages of + // "-" and "_". should_replace_hyphens is a flag to indicate that + // we need to convert the stem "web-stuff" to "web_stuff", so we + // won't miss "web_stuff.wasm". + should_replace_hyphens: bool, +} + +impl FileType { + pub fn filename(&self, stem: &str) -> String { + let stem = if self.should_replace_hyphens { + stem.replace("-", "_") + } else { + stem.to_string() + }; + format!("{}{}{}", self.prefix, stem, self.suffix) + } +} + +impl TargetInfo { + pub fn new( + config: &Config, + requested_target: &Option, + rustc: &Rustc, + kind: Kind, + ) -> CargoResult { + let rustflags = env_args( + config, + requested_target, + &rustc.host, + None, + kind, + "RUSTFLAGS", + )?; + let mut process = rustc.process(); + process + .arg("-") + .arg("--crate-name") + .arg("___") + .arg("--print=file-names") + .args(&rustflags) + .env_remove("RUST_LOG"); + + let target_triple = requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(&rustc.host); + if kind == Kind::Target { + process.arg("--target").arg(target_triple); + } + + let crate_type_process = process.clone(); + const KNOWN_CRATE_TYPES: &[&str] = + &["bin", "rlib", "dylib", "cdylib", "staticlib", "proc-macro"]; + for crate_type in KNOWN_CRATE_TYPES.iter() { + process.arg("--crate-type").arg(crate_type); + } + + let mut with_cfg = process.clone(); + with_cfg.arg("--print=sysroot"); + with_cfg.arg("--print=cfg"); + + let mut has_cfg_and_sysroot = true; + let (output, error) = rustc + .cached_output(&with_cfg) + .or_else(|_| { + has_cfg_and_sysroot = false; + rustc.cached_output(&process) + }) + .chain_err(|| "failed to run `rustc` to learn about target-specific information")?; + + let mut lines = output.lines(); + let mut map = HashMap::new(); + for crate_type in KNOWN_CRATE_TYPES { + let out = parse_crate_type(crate_type, &error, &mut lines)?; + map.insert(crate_type.to_string(), out); + } + + let mut sysroot_libdir = None; + if has_cfg_and_sysroot { + let line = match lines.next() { + Some(line) => line, + None => bail!( + "output of --print=sysroot missing when learning about \ + target-specific information from rustc" + ), + }; + let mut rustlib = PathBuf::from(line); + if kind == Kind::Host { + if cfg!(windows) { + rustlib.push("bin"); + } else { + rustlib.push("lib"); + } + sysroot_libdir = Some(rustlib); + } else { + rustlib.push("lib"); + rustlib.push("rustlib"); + rustlib.push(target_triple); + rustlib.push("lib"); + sysroot_libdir = Some(rustlib); + } + } + + let cfg = if has_cfg_and_sysroot { + Some(lines.map(Cfg::from_str).collect::>()?) + } else { + None + }; + + Ok(TargetInfo { + crate_type_process: Some(crate_type_process), + crate_types: RefCell::new(map), + cfg, + sysroot_libdir, + }) + } + + pub fn cfg(&self) -> Option<&[Cfg]> { + self.cfg.as_ref().map(|v| v.as_ref()) + } + + pub fn file_types( + &self, + crate_type: &str, + flavor: FileFlavor, + kind: &TargetKind, + target_triple: &str, + ) -> CargoResult>> { + let mut crate_types = self.crate_types.borrow_mut(); + let entry = crate_types.entry(crate_type.to_string()); + let crate_type_info = match entry { + Entry::Occupied(o) => &*o.into_mut(), + Entry::Vacant(v) => { + let value = self.discover_crate_type(v.key())?; + &*v.insert(value) + } + }; + let (prefix, suffix) = match *crate_type_info { + Some((ref prefix, ref suffix)) => (prefix, suffix), + None => return Ok(None), + }; + let mut ret = vec![ + FileType { + suffix: suffix.clone(), + prefix: prefix.clone(), + flavor, + should_replace_hyphens: false, + }, + ]; + + // rust-lang/cargo#4500 + if target_triple.ends_with("pc-windows-msvc") && crate_type.ends_with("dylib") + && suffix == ".dll" + { + ret.push(FileType { + suffix: ".dll.lib".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Normal, + should_replace_hyphens: false, + }) + } + + // rust-lang/cargo#4535 + if target_triple.starts_with("wasm32-") && crate_type == "bin" && suffix == ".js" { + ret.push(FileType { + suffix: ".wasm".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Normal, + should_replace_hyphens: true, + }) + } + + // rust-lang/cargo#4490, rust-lang/cargo#4960 + // - only uplift debuginfo for binaries. + // tests are run directly from target/debug/deps/ + // and examples are inside target/debug/examples/ which already have symbols next to them + // so no need to do anything. + if *kind == TargetKind::Bin { + if target_triple.contains("-apple-") { + ret.push(FileType { + suffix: ".dSYM".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::DebugInfo, + should_replace_hyphens: false, + }) + } else if target_triple.ends_with("-msvc") { + ret.push(FileType { + suffix: ".pdb".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::DebugInfo, + should_replace_hyphens: false, + }) + } + } + + Ok(Some(ret)) + } + + fn discover_crate_type(&self, crate_type: &str) -> CargoResult> { + let mut process = self.crate_type_process.clone().unwrap(); + + process.arg("--crate-type").arg(crate_type); + + let output = process.exec_with_output().chain_err(|| { + format!( + "failed to run `rustc` to learn about \ + crate-type {} information", + crate_type + ) + })?; + + let error = str::from_utf8(&output.stderr).unwrap(); + let output = str::from_utf8(&output.stdout).unwrap(); + Ok(parse_crate_type(crate_type, error, &mut output.lines())?) + } +} + +/// Takes rustc output (using specialized command line args), and calculates the file prefix and +/// suffix for the given crate type, or returns None if the type is not supported. (e.g. for a +/// rust library like libcargo.rlib, prefix = "lib", suffix = "rlib"). +/// +/// The caller needs to ensure that the lines object is at the correct line for the given crate +/// type: this is not checked. +// This function can not handle more than 1 file per type (with wasm32-unknown-emscripten, there +// are 2 files for bin (.wasm and .js)) +fn parse_crate_type( + crate_type: &str, + error: &str, + lines: &mut str::Lines, +) -> CargoResult> { + let not_supported = error.lines().any(|line| { + (line.contains("unsupported crate type") || line.contains("unknown crate type")) + && line.contains(crate_type) + }); + if not_supported { + return Ok(None); + } + let line = match lines.next() { + Some(line) => line, + None => bail!( + "malformed output when learning about \ + crate-type {} information", + crate_type + ), + }; + let mut parts = line.trim().split("___"); + let prefix = parts.next().unwrap(); + let suffix = match parts.next() { + Some(part) => part, + None => bail!( + "output of --print=file-names has changed in \ + the compiler, cannot parse" + ), + }; + + Ok(Some((prefix.to_string(), suffix.to_string()))) +} diff --git a/src/cargo/core/compiler/compilation.rs b/src/cargo/core/compiler/compilation.rs new file mode 100644 index 000000000..e6900e64c --- /dev/null +++ b/src/cargo/core/compiler/compilation.rs @@ -0,0 +1,219 @@ +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::ffi::OsStr; +use std::path::PathBuf; + +use semver::Version; +use lazycell::LazyCell; + +use core::{Package, PackageId, Target, TargetKind}; +use util::{self, join_paths, process, CargoResult, Config, ProcessBuilder}; +use super::BuildContext; + +/// A structure returning the result of a compilation. +pub struct Compilation<'cfg> { + /// A mapping from a package to the list of libraries that need to be + /// linked when working with that package. + pub libraries: HashMap>, + + /// An array of all tests created during this compilation. + pub tests: Vec<(Package, TargetKind, String, PathBuf)>, + + /// An array of all binaries created. + pub binaries: Vec, + + /// All directories for the output of native build commands. + /// + /// This is currently used to drive some entries which are added to the + /// LD_LIBRARY_PATH as appropriate. + /// + /// The order should be deterministic. + // TODO: deprecated, remove + pub native_dirs: BTreeSet, + + /// Root output directory (for the local package's artifacts) + pub root_output: PathBuf, + + /// Output directory for rust dependencies. + /// May be for the host or for a specific target. + pub deps_output: PathBuf, + + /// Output directory for the rust host dependencies. + pub host_deps_output: PathBuf, + + /// The path to rustc's own libstd + pub host_dylib_path: Option, + + /// The path to libstd for the target + pub target_dylib_path: Option, + + /// Extra environment variables that were passed to compilations and should + /// be passed to future invocations of programs. + pub extra_env: HashMap>, + + pub to_doc_test: Vec, + + /// Features per package enabled during this compilation. + pub cfgs: HashMap>, + + /// Flags to pass to rustdoc when invoked from cargo test, per package. + pub rustdocflags: HashMap>, + + pub host: String, + pub target: String, + + config: &'cfg Config, + rustc_process: ProcessBuilder, + + target_runner: LazyCell)>>, +} + +impl<'cfg> Compilation<'cfg> { + pub fn new<'a>(bcx: &BuildContext<'a, 'cfg>) -> Compilation<'cfg> { + Compilation { + libraries: HashMap::new(), + native_dirs: BTreeSet::new(), // TODO: deprecated, remove + root_output: PathBuf::from("/"), + deps_output: PathBuf::from("/"), + host_deps_output: PathBuf::from("/"), + host_dylib_path: bcx.host_info.sysroot_libdir.clone(), + target_dylib_path: bcx.target_info.sysroot_libdir.clone(), + tests: Vec::new(), + binaries: Vec::new(), + extra_env: HashMap::new(), + to_doc_test: Vec::new(), + cfgs: HashMap::new(), + rustdocflags: HashMap::new(), + config: bcx.config, + rustc_process: bcx.rustc.process(), + host: bcx.host_triple().to_string(), + target: bcx.target_triple().to_string(), + target_runner: LazyCell::new(), + } + } + + /// See `process`. + pub fn rustc_process(&self, pkg: &Package) -> CargoResult { + self.fill_env(self.rustc_process.clone(), pkg, true) + } + + /// See `process`. + pub fn rustdoc_process(&self, pkg: &Package) -> CargoResult { + self.fill_env(process(&*self.config.rustdoc()?), pkg, false) + } + + /// See `process`. + pub fn host_process>( + &self, + cmd: T, + pkg: &Package, + ) -> CargoResult { + self.fill_env(process(cmd), pkg, true) + } + + fn target_runner(&self) -> CargoResult<&Option<(PathBuf, Vec)>> { + self.target_runner.try_borrow_with(|| { + let key = format!("target.{}.runner", self.target); + Ok(self.config.get_path_and_args(&key)?.map(|v| v.val)) + }) + } + + /// See `process`. + pub fn target_process>( + &self, + cmd: T, + pkg: &Package, + ) -> CargoResult { + let builder = if let Some((ref runner, ref args)) = *self.target_runner()? { + let mut builder = process(runner); + builder.args(args); + builder.arg(cmd); + builder + } else { + process(cmd) + }; + self.fill_env(builder, pkg, false) + } + + /// Prepares a new process with an appropriate environment to run against + /// the artifacts produced by the build process. + /// + /// The package argument is also used to configure environment variables as + /// well as the working directory of the child process. + fn fill_env( + &self, + mut cmd: ProcessBuilder, + pkg: &Package, + is_host: bool, + ) -> CargoResult { + let mut search_path = if is_host { + let mut search_path = vec![self.host_deps_output.clone()]; + search_path.extend(self.host_dylib_path.clone()); + search_path + } else { + let mut search_path = + super::filter_dynamic_search_path(self.native_dirs.iter(), &self.root_output); + search_path.push(self.root_output.clone()); + search_path.push(self.deps_output.clone()); + search_path.extend(self.target_dylib_path.clone()); + search_path + }; + + search_path.extend(util::dylib_path().into_iter()); + let search_path = join_paths(&search_path, util::dylib_path_envvar())?; + + cmd.env(util::dylib_path_envvar(), &search_path); + if let Some(env) = self.extra_env.get(pkg.package_id()) { + for &(ref k, ref v) in env { + cmd.env(k, v); + } + } + + let metadata = pkg.manifest().metadata(); + + let cargo_exe = self.config.cargo_exe()?; + cmd.env(::CARGO_ENV, cargo_exe); + + // When adding new environment variables depending on + // crate properties which might require rebuild upon change + // consider adding the corresponding properties to the hash + // in BuildContext::target_metadata() + cmd.env("CARGO_MANIFEST_DIR", pkg.root()) + .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) + .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) + .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string()) + .env( + "CARGO_PKG_VERSION_PRE", + &pre_version_component(pkg.version()), + ) + .env("CARGO_PKG_VERSION", &pkg.version().to_string()) + .env("CARGO_PKG_NAME", &*pkg.name()) + .env( + "CARGO_PKG_DESCRIPTION", + metadata.description.as_ref().unwrap_or(&String::new()), + ) + .env( + "CARGO_PKG_HOMEPAGE", + metadata.homepage.as_ref().unwrap_or(&String::new()), + ) + .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":")) + .cwd(pkg.root()); + Ok(cmd) + } +} + +fn pre_version_component(v: &Version) -> String { + if v.pre.is_empty() { + return String::new(); + } + + let mut ret = String::new(); + + for (i, x) in v.pre.iter().enumerate() { + if i != 0 { + ret.push('.') + }; + ret.push_str(&x.to_string()); + } + + ret +} diff --git a/src/cargo/core/compiler/context/compilation_files.rs b/src/cargo/core/compiler/context/compilation_files.rs new file mode 100644 index 000000000..e94a0aa4f --- /dev/null +++ b/src/cargo/core/compiler/context/compilation_files.rs @@ -0,0 +1,454 @@ +use std::collections::HashMap; +use std::env; +use std::fmt; +use std::hash::{Hash, Hasher, SipHasher}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use lazycell::LazyCell; + +use super::{BuildContext, Context, FileFlavor, Kind, Layout, Unit}; +use core::{TargetKind, Workspace}; +use util::{self, CargoResult}; + +#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct Metadata(u64); + +impl fmt::Display for Metadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:016x}", self.0) + } +} + +pub struct CompilationFiles<'a, 'cfg: 'a> { + /// The target directory layout for the host (and target if it is the same as host) + pub(super) host: Layout, + /// The target directory layout for the target (if different from then host) + pub(super) target: Option, + export_dir: Option<(PathBuf, Vec>)>, + ws: &'a Workspace<'cfg>, + metas: HashMap, Option>, + /// For each Unit, a list all files produced. + outputs: HashMap, LazyCell>>>, +} + +#[derive(Debug)] +pub struct OutputFile { + /// File name that will be produced by the build process (in `deps`). + pub path: PathBuf, + /// If it should be linked into `target`, and what it should be called + /// (e.g. without metadata). + pub hardlink: Option, + /// Type of the file (library / debug symbol / else). + pub flavor: FileFlavor, +} + +impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { + pub(super) fn new( + roots: &[Unit<'a>], + host: Layout, + target: Option, + export_dir: Option, + ws: &'a Workspace<'cfg>, + cx: &Context<'a, 'cfg>, + ) -> CompilationFiles<'a, 'cfg> { + let mut metas = HashMap::new(); + for unit in roots { + metadata_of(unit, cx, &mut metas); + } + let outputs = metas + .keys() + .cloned() + .map(|unit| (unit, LazyCell::new())) + .collect(); + CompilationFiles { + ws, + host, + target, + export_dir: export_dir.map(|dir| (dir, roots.to_vec())), + metas, + outputs, + } + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn layout(&self, kind: Kind) -> &Layout { + match kind { + Kind::Host => &self.host, + Kind::Target => self.target.as_ref().unwrap_or(&self.host), + } + } + + /// Get the metadata for a target in a specific profile + /// We build to the path: "{filename}-{target_metadata}" + /// We use a linking step to link/copy to a predictable filename + /// like `target/debug/libfoo.{a,so,rlib}` and such. + pub fn metadata(&self, unit: &Unit<'a>) -> Option { + self.metas[unit].clone() + } + + /// Get the short hash based only on the PackageId + /// Used for the metadata when target_metadata returns None + pub fn target_short_hash(&self, unit: &Unit) -> String { + let hashable = unit.pkg.package_id().stable_hash(self.ws.root()); + util::short_hash(&hashable) + } + + /// Returns the appropriate output directory for the specified package and + /// target. + pub fn out_dir(&self, unit: &Unit<'a>) -> PathBuf { + if unit.mode.is_doc() { + self.layout(unit.kind).root().parent().unwrap().join("doc") + } else if unit.target.is_custom_build() { + self.build_script_dir(unit) + } else if unit.target.is_example() { + self.layout(unit.kind).examples().to_path_buf() + } else { + self.deps_dir(unit).to_path_buf() + } + } + + pub fn export_dir(&self, unit: &Unit<'a>) -> Option { + let &(ref dir, ref roots) = self.export_dir.as_ref()?; + if roots.contains(unit) { + Some(dir.clone()) + } else { + None + } + } + + pub fn pkg_dir(&self, unit: &Unit<'a>) -> String { + let name = unit.pkg.package_id().name(); + match self.metas[unit] { + Some(ref meta) => format!("{}-{}", name, meta), + None => format!("{}-{}", name, self.target_short_hash(unit)), + } + } + + /// Return the root of the build output tree + pub fn target_root(&self) -> &Path { + self.host.dest() + } + + pub fn host_deps(&self) -> &Path { + self.host.deps() + } + + /// Returns the directories where Rust crate dependencies are found for the + /// specified unit. + pub fn deps_dir(&self, unit: &Unit) -> &Path { + self.layout(unit.kind).deps() + } + + pub fn fingerprint_dir(&self, unit: &Unit<'a>) -> PathBuf { + let dir = self.pkg_dir(unit); + self.layout(unit.kind).fingerprint().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_dir(&self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(!unit.mode.is_run_custom_build()); + let dir = self.pkg_dir(unit); + self.layout(Kind::Host).build().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_out_dir(&self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(unit.mode.is_run_custom_build()); + let dir = self.pkg_dir(unit); + self.layout(unit.kind).build().join(dir).join("out") + } + + /// Returns the file stem for a given target/profile combo (with metadata) + pub fn file_stem(&self, unit: &Unit<'a>) -> String { + match self.metas[unit] { + Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), metadata), + None => self.bin_stem(unit), + } + } + + pub(super) fn outputs( + &self, + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + ) -> CargoResult>> { + self.outputs[unit] + .try_borrow_with(|| self.calc_outputs(unit, bcx)) + .map(Arc::clone) + } + + /// Returns the bin stem for a given target (without metadata) + fn bin_stem(&self, unit: &Unit) -> String { + if unit.target.allows_underscores() { + unit.target.name().to_string() + } else { + unit.target.crate_name() + } + } + + /// Returns a tuple with the directory and name of the hard link we expect + /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef + /// and link_stem would be out_dir/foo + /// This function returns it in two parts so the caller can add prefix/suffix + /// to filename separately + /// + /// Returns an Option because in some cases we don't want to link + /// (eg a dependent lib) + fn link_stem(&self, unit: &Unit<'a>) -> Option<(PathBuf, String)> { + let out_dir = self.out_dir(unit); + let bin_stem = self.bin_stem(unit); + let file_stem = self.file_stem(unit); + + // We currently only lift files up from the `deps` directory. If + // it was compiled into something like `example/` or `doc/` then + // we don't want to link it up. + if out_dir.ends_with("deps") { + // Don't lift up library dependencies + if self.ws.members().find(|&p| p == unit.pkg).is_none() && !unit.target.is_bin() { + None + } else { + Some(( + out_dir.parent().unwrap().to_owned(), + if unit.mode.is_any_test() { + file_stem + } else { + bin_stem + }, + )) + } + } else if bin_stem == file_stem { + None + } else if out_dir.ends_with("examples") || out_dir.parent().unwrap().ends_with("build") { + Some((out_dir, bin_stem)) + } else { + None + } + } + + fn calc_outputs( + &self, + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + ) -> CargoResult>> { + let out_dir = self.out_dir(unit); + let file_stem = self.file_stem(unit); + let link_stem = self.link_stem(unit); + let info = if unit.target.for_host() { + &bcx.host_info + } else { + &bcx.target_info + }; + + let mut ret = Vec::new(); + let mut unsupported = Vec::new(); + { + if unit.mode.is_check() { + // This is not quite correct for non-lib targets. rustc + // currently does not emit rmeta files, so there is nothing to + // check for! See #3624. + let path = out_dir.join(format!("lib{}.rmeta", file_stem)); + let hardlink = link_stem + .clone() + .map(|(ld, ls)| ld.join(format!("lib{}.rmeta", ls))); + ret.push(OutputFile { + path, + hardlink, + flavor: FileFlavor::Linkable, + }); + } else { + let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> { + let crate_type = if crate_type == "lib" { + "rlib" + } else { + crate_type + }; + let file_types = info.file_types( + crate_type, + flavor, + unit.target.kind(), + bcx.target_triple(), + )?; + + match file_types { + Some(types) => for file_type in types { + let path = out_dir.join(file_type.filename(&file_stem)); + let hardlink = link_stem + .as_ref() + .map(|&(ref ld, ref ls)| ld.join(file_type.filename(ls))); + ret.push(OutputFile { + path, + hardlink, + flavor: file_type.flavor, + }); + }, + // not supported, don't worry about it + None => { + unsupported.push(crate_type.to_string()); + } + } + Ok(()) + }; + //info!("{:?}", unit); + match *unit.target.kind() { + TargetKind::Bin + | TargetKind::CustomBuild + | TargetKind::ExampleBin + | TargetKind::Bench + | TargetKind::Test => { + add("bin", FileFlavor::Normal)?; + } + TargetKind::Lib(..) | TargetKind::ExampleLib(..) if unit.mode.is_any_test() => { + add("bin", FileFlavor::Normal)?; + } + TargetKind::ExampleLib(ref kinds) | TargetKind::Lib(ref kinds) => { + for kind in kinds { + add( + kind.crate_type(), + if kind.linkable() { + FileFlavor::Linkable + } else { + FileFlavor::Normal + }, + )?; + } + } + } + } + } + if ret.is_empty() { + if !unsupported.is_empty() { + bail!( + "cannot produce {} for `{}` as the target `{}` \ + does not support these crate types", + unsupported.join(", "), + unit.pkg, + bcx.target_triple() + ) + } + bail!( + "cannot compile `{}` as the target `{}` does not \ + support any of the output crate types", + unit.pkg, + bcx.target_triple() + ); + } + info!("Target filenames: {:?}", ret); + + Ok(Arc::new(ret)) + } +} + +fn metadata_of<'a, 'cfg>( + unit: &Unit<'a>, + cx: &Context<'a, 'cfg>, + metas: &mut HashMap, Option>, +) -> Option { + if !metas.contains_key(unit) { + let meta = compute_metadata(unit, cx, metas); + metas.insert(*unit, meta); + for unit in cx.dep_targets(unit) { + metadata_of(&unit, cx, metas); + } + } + metas[unit].clone() +} + +fn compute_metadata<'a, 'cfg>( + unit: &Unit<'a>, + cx: &Context<'a, 'cfg>, + metas: &mut HashMap, Option>, +) -> Option { + // No metadata for dylibs because of a couple issues + // - OSX encodes the dylib name in the executable + // - Windows rustc multiple files of which we can't easily link all of them + // + // No metadata for bin because of an issue + // - wasm32 rustc/emcc encodes the .wasm name in the .js (rust-lang/cargo#4535) + // + // Two exceptions + // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict) + // 2) __CARGO_DEFAULT_LIB_METADATA env var + // + // Note, though, that the compiler's build system at least wants + // path dependencies (eg libstd) to have hashes in filenames. To account for + // that we have an extra hack here which reads the + // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a + // hash in the filename if that's present. + // + // This environment variable should not be relied on! It's + // just here for rustbuild. We need a more principled method + // doing this eventually. + let bcx = &cx.bcx; + let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA"); + if !(unit.mode.is_any_test() || unit.mode.is_check()) + && (unit.target.is_dylib() || unit.target.is_cdylib() + || (unit.target.is_bin() && bcx.target_triple().starts_with("wasm32-"))) + && unit.pkg.package_id().source_id().is_path() + && __cargo_default_lib_metadata.is_err() + { + return None; + } + + let mut hasher = SipHasher::new_with_keys(0, 0); + + // Unique metadata per (name, source, version) triple. This'll allow us + // to pull crates from anywhere w/o worrying about conflicts + unit.pkg + .package_id() + .stable_hash(bcx.ws.root()) + .hash(&mut hasher); + + // Add package properties which map to environment variables + // exposed by Cargo + let manifest_metadata = unit.pkg.manifest().metadata(); + manifest_metadata.authors.hash(&mut hasher); + manifest_metadata.description.hash(&mut hasher); + manifest_metadata.homepage.hash(&mut hasher); + + // Also mix in enabled features to our metadata. This'll ensure that + // when changing feature sets each lib is separately cached. + bcx.resolve + .features_sorted(unit.pkg.package_id()) + .hash(&mut hasher); + + // Mix in the target-metadata of all the dependencies of this target + { + let mut deps_metadata = cx.dep_targets(unit) + .iter() + .map(|dep| metadata_of(dep, cx, metas)) + .collect::>(); + deps_metadata.sort(); + deps_metadata.hash(&mut hasher); + } + + // Throw in the profile we're compiling with. This helps caching + // panic=abort and panic=unwind artifacts, additionally with various + // settings like debuginfo and whatnot. + unit.profile.hash(&mut hasher); + cx.used_in_plugin.contains(unit).hash(&mut hasher); + unit.mode.hash(&mut hasher); + if let Some(ref args) = bcx.extra_args_for(unit) { + args.hash(&mut hasher); + } + + // Artifacts compiled for the host should have a different metadata + // piece than those compiled for the target, so make sure we throw in + // the unit's `kind` as well + unit.kind.hash(&mut hasher); + + // Finally throw in the target name/kind. This ensures that concurrent + // compiles of targets in the same crate don't collide. + unit.target.name().hash(&mut hasher); + unit.target.kind().hash(&mut hasher); + + bcx.rustc.verbose_version.hash(&mut hasher); + + // Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present. + // This should be the release channel, to get a different hash for each channel. + if let Ok(ref channel) = __cargo_default_lib_metadata { + channel.hash(&mut hasher); + } + Some(Metadata(hasher.finish())) +} diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs new file mode 100644 index 000000000..af04750a8 --- /dev/null +++ b/src/cargo/core/compiler/context/mod.rs @@ -0,0 +1,491 @@ +#![allow(deprecated)] +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; +use std::path::PathBuf; +use std::sync::Arc; + +use jobserver::Client; + +use core::{Package, PackageId, Resolve, Target}; +use core::profiles::Profile; +use util::errors::{CargoResult, CargoResultExt}; +use util::{internal, profile, Config}; + +use super::custom_build::{self, BuildDeps, BuildScripts, BuildState}; +use super::fingerprint::Fingerprint; +use super::job_queue::JobQueue; +use super::layout::Layout; +use super::{BuildContext, Compilation, CompileMode, Executor, FileFlavor, Kind}; + +mod unit_dependencies; +use self::unit_dependencies::build_unit_dependencies; + +mod compilation_files; +pub use self::compilation_files::Metadata; +use self::compilation_files::{CompilationFiles, OutputFile}; + +/// All information needed to define a Unit. +/// +/// A unit is an object that has enough information so that cargo knows how to build it. +/// For example, if your project has dependencies, then every dependency will be built as a library +/// unit. If your project is a library, then it will be built as a library unit as well, or if it +/// is a binary with `main.rs`, then a binary will be output. There are also separate unit types +/// for `test`ing and `check`ing, amongst others. +/// +/// The unit also holds information about all possible metadata about the package in `pkg`. +/// +/// A unit needs to know extra information in addition to the type and root source file. For +/// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know +/// whether you want a debug or release build. There is enough information in this struct to figure +/// all that out. +#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)] +pub struct Unit<'a> { + /// Information about available targets, which files to include/exclude, etc. Basically stuff in + /// `Cargo.toml`. + pub pkg: &'a Package, + /// Information about the specific target to build, out of the possible targets in `pkg`. Not + /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a + /// build. + pub target: &'a Target, + /// The profile contains information about *how* the build should be run, including debug + /// level, etc. + pub profile: Profile, + /// Whether this compilation unit is for the host or target architecture. + /// + /// For example, when + /// cross compiling and using a custom build script, the build script needs to be compiled for + /// the host architecture so the host rustc can use it (when compiling to the target + /// architecture). + pub kind: Kind, + /// The "mode" this unit is being compiled for. See `CompileMode` for + /// more details. + pub mode: CompileMode, +} + +pub struct Context<'a, 'cfg: 'a> { + pub bcx: &'a BuildContext<'a, 'cfg>, + pub compilation: Compilation<'cfg>, + pub build_state: Arc, + pub build_script_overridden: HashSet<(PackageId, Kind)>, + pub build_explicit_deps: HashMap, BuildDeps>, + pub fingerprints: HashMap, Arc>, + pub compiled: HashSet>, + pub build_scripts: HashMap, Arc>, + pub links: Links<'a>, + pub used_in_plugin: HashSet>, + pub jobserver: Client, + unit_dependencies: HashMap, Vec>>, + files: Option>, +} + +impl<'a, 'cfg> Context<'a, 'cfg> { + pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { + // Load up the jobserver that we'll use to manage our parallelism. This + // is the same as the GNU make implementation of a jobserver, and + // intentionally so! It's hoped that we can interact with GNU make and + // all share the same jobserver. + // + // Note that if we don't have a jobserver in our environment then we + // create our own, and we create it with `n-1` tokens because one token + // is ourself, a running process. + let jobserver = match config.jobserver_from_env() { + Some(c) => c.clone(), + None => Client::new(bcx.build_config.jobs as usize - 1) + .chain_err(|| "failed to create jobserver")?, + }; + + Ok(Self { + bcx, + compilation: Compilation::new(bcx), + build_state: Arc::new(BuildState::new(&bcx.host_config, &bcx.target_config)), + fingerprints: HashMap::new(), + compiled: HashSet::new(), + build_scripts: HashMap::new(), + build_explicit_deps: HashMap::new(), + links: Links::new(), + used_in_plugin: HashSet::new(), + jobserver, + build_script_overridden: HashSet::new(), + + unit_dependencies: HashMap::new(), + files: None, + }) + } + + // Returns a mapping of the root package plus its immediate dependencies to + // where the compiled libraries are all located. + pub fn compile( + mut self, + units: &[Unit<'a>], + export_dir: Option, + exec: &Arc, + ) -> CargoResult> { + let mut queue = JobQueue::new(self.bcx); + self.prepare_units(export_dir, units)?; + self.prepare()?; + custom_build::build_map(&mut self, units)?; + + for unit in units.iter() { + // Build up a list of pending jobs, each of which represent + // compiling a particular package. No actual work is executed as + // part of this, that's all done next as part of the `execute` + // function which will run everything in order with proper + // parallelism. + super::compile(&mut self, &mut queue, unit, exec)?; + } + + // Now that we've figured out everything that we're going to do, do it! + queue.execute(&mut self)?; + + for unit in units.iter() { + for output in self.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + + let bindst = match output.hardlink { + Some(ref link_dst) => link_dst, + None => &output.path, + }; + + if unit.mode.is_any_test() && !unit.mode.is_check() { + self.compilation.tests.push(( + unit.pkg.clone(), + unit.target.kind().clone(), + unit.target.name().to_string(), + output.path.clone(), + )); + } else if unit.target.is_bin() || unit.target.is_bin_example() { + self.compilation.binaries.push(bindst.clone()); + } else if unit.target.is_lib() { + let pkgid = unit.pkg.package_id().clone(); + self.compilation + .libraries + .entry(pkgid) + .or_insert_with(HashSet::new) + .insert((unit.target.clone(), output.path.clone())); + } + } + + for dep in self.dep_targets(unit).iter() { + if !unit.target.is_lib() { + continue; + } + + if dep.mode.is_run_custom_build() { + let out_dir = self.files().build_script_out_dir(dep).display().to_string(); + self.compilation + .extra_env + .entry(dep.pkg.package_id().clone()) + .or_insert_with(Vec::new) + .push(("OUT_DIR".to_string(), out_dir)); + } + + if !dep.target.is_lib() { + continue; + } + if dep.mode.is_doc() { + continue; + } + + let outputs = self.outputs(dep)?; + self.compilation + .libraries + .entry(unit.pkg.package_id().clone()) + .or_insert_with(HashSet::new) + .extend( + outputs + .iter() + .map(|output| (dep.target.clone(), output.path.clone())), + ); + } + + let feats = self.bcx.resolve.features(unit.pkg.package_id()); + if !feats.is_empty() { + self.compilation + .cfgs + .entry(unit.pkg.package_id().clone()) + .or_insert_with(|| { + feats + .iter() + .map(|feat| format!("feature=\"{}\"", feat)) + .collect() + }); + } + let rustdocflags = self.bcx.rustdocflags_args(unit)?; + if !rustdocflags.is_empty() { + self.compilation + .rustdocflags + .entry(unit.pkg.package_id().clone()) + .or_insert(rustdocflags); + } + + super::output_depinfo(&mut self, unit)?; + } + + for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() { + self.compilation + .cfgs + .entry(pkg.clone()) + .or_insert_with(HashSet::new) + .extend(output.cfgs.iter().cloned()); + + self.compilation + .extra_env + .entry(pkg.clone()) + .or_insert_with(Vec::new) + .extend(output.env.iter().cloned()); + + for dir in output.library_paths.iter() { + self.compilation.native_dirs.insert(dir.clone()); + } + } + Ok(self.compilation) + } + + pub fn prepare_units( + &mut self, + export_dir: Option, + units: &[Unit<'a>], + ) -> CargoResult<()> { + let dest = if self.bcx.build_config.release { + "release" + } else { + "debug" + }; + let host_layout = Layout::new(self.bcx.ws, None, dest)?; + let target_layout = match self.bcx.build_config.requested_target.as_ref() { + Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?), + None => None, + }; + + build_unit_dependencies(units, self.bcx, &mut self.unit_dependencies)?; + self.build_used_in_plugin_map(units)?; + let files = CompilationFiles::new( + units, + host_layout, + target_layout, + export_dir, + self.bcx.ws, + self, + ); + self.files = Some(files); + Ok(()) + } + + /// Prepare this context, ensuring that all filesystem directories are in + /// place. + pub fn prepare(&mut self) -> CargoResult<()> { + let _p = profile::start("preparing layout"); + + self.files_mut() + .host + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + if let Some(ref mut target) = self.files.as_mut().unwrap().target { + target + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + } + + self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf(); + + let files = self.files.as_ref().unwrap(); + let layout = files.target.as_ref().unwrap_or(&files.host); + self.compilation.root_output = layout.dest().to_path_buf(); + self.compilation.deps_output = layout.deps().to_path_buf(); + Ok(()) + } + + /// Builds up the `used_in_plugin` internal to this context from the list of + /// top-level units. + /// + /// This will recursively walk `units` and all of their dependencies to + /// determine which crate are going to be used in plugins or not. + fn build_used_in_plugin_map(&mut self, units: &[Unit<'a>]) -> CargoResult<()> { + let mut visited = HashSet::new(); + for unit in units { + self.walk_used_in_plugin_map(unit, unit.target.for_host(), &mut visited)?; + } + Ok(()) + } + + fn walk_used_in_plugin_map( + &mut self, + unit: &Unit<'a>, + is_plugin: bool, + visited: &mut HashSet<(Unit<'a>, bool)>, + ) -> CargoResult<()> { + if !visited.insert((*unit, is_plugin)) { + return Ok(()); + } + if is_plugin { + self.used_in_plugin.insert(*unit); + } + for unit in self.dep_targets(unit) { + self.walk_used_in_plugin_map(&unit, is_plugin || unit.target.for_host(), visited)?; + } + Ok(()) + } + + pub fn files(&self) -> &CompilationFiles<'a, 'cfg> { + self.files.as_ref().unwrap() + } + + fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> { + self.files.as_mut().unwrap() + } + + /// Return the filenames that the given target for the given profile will + /// generate as a list of 3-tuples (filename, link_dst, linkable) + /// + /// - filename: filename rustc compiles to. (Often has metadata suffix). + /// - link_dst: Optional file to link/copy the result to (without metadata suffix) + /// - linkable: Whether possible to link against file (eg it's a library) + pub fn outputs(&mut self, unit: &Unit<'a>) -> CargoResult>> { + self.files.as_ref().unwrap().outputs(unit, self.bcx) + } + + /// For a package, return all targets which are registered as dependencies + /// for that package. + // TODO: this ideally should be `-> &[Unit<'a>]` + pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec> { + // If this build script's execution has been overridden then we don't + // actually depend on anything, we've reached the end of the dependency + // chain as we've got all the info we're gonna get. + // + // Note there's a subtlety about this piece of code! The + // `build_script_overridden` map here is populated in + // `custom_build::build_map` which you need to call before inspecting + // dependencies. However, that code itself calls this method and + // gets a full pre-filtered set of dependencies. This is not super + // obvious, and clear, but it does work at the moment. + if unit.target.is_custom_build() { + let key = (unit.pkg.package_id().clone(), unit.kind); + if self.build_script_overridden.contains(&key) { + return Vec::new(); + } + } + self.unit_dependencies[unit].clone() + } + + pub fn incremental_args(&self, unit: &Unit) -> CargoResult> { + // There's a number of ways to configure incremental compilation right + // now. In order of descending priority (first is highest priority) we + // have: + // + // * `CARGO_INCREMENTAL` - this is blanket used unconditionally to turn + // on/off incremental compilation for any cargo subcommand. We'll + // respect this if set. + // * `build.incremental` - in `.cargo/config` this blanket key can + // globally for a system configure whether incremental compilation is + // enabled. Note that setting this to `true` will not actually affect + // all builds though. For example a `true` value doesn't enable + // release incremental builds, only dev incremental builds. This can + // be useful to globally disable incremental compilation like + // `CARGO_INCREMENTAL`. + // * `profile.dev.incremental` - in `Cargo.toml` specific profiles can + // be configured to enable/disable incremental compilation. This can + // be primarily used to disable incremental when buggy for a project. + // * Finally, each profile has a default for whether it will enable + // incremental compilation or not. Primarily development profiles + // have it enabled by default while release profiles have it disabled + // by default. + let global_cfg = self.bcx + .config + .get_bool("build.incremental")? + .map(|c| c.val); + let incremental = match ( + self.bcx.incremental_env, + global_cfg, + unit.profile.incremental, + ) { + (Some(v), _, _) => v, + (None, Some(false), _) => false, + (None, _, other) => other, + }; + + if !incremental { + return Ok(Vec::new()); + } + + // Only enable incremental compilation for sources the user can + // modify (aka path sources). For things that change infrequently, + // non-incremental builds yield better performance in the compiler + // itself (aka crates.io / git dependencies) + // + // (see also https://github.com/rust-lang/cargo/issues/3972) + if !unit.pkg.package_id().source_id().is_path() { + return Ok(Vec::new()); + } + + let dir = self.files().layout(unit.kind).incremental().display(); + Ok(vec!["-C".to_string(), format!("incremental={}", dir)]) + } +} + +#[derive(Default)] +pub struct Links<'a> { + validated: HashSet<&'a PackageId>, + links: HashMap, +} + +impl<'a> Links<'a> { + pub fn new() -> Links<'a> { + Links { + validated: HashSet::new(), + links: HashMap::new(), + } + } + + pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'a>) -> CargoResult<()> { + if !self.validated.insert(unit.pkg.package_id()) { + return Ok(()); + } + let lib = match unit.pkg.manifest().links() { + Some(lib) => lib, + None => return Ok(()), + }; + if let Some(prev) = self.links.get(lib) { + let pkg = unit.pkg.package_id(); + + let describe_path = |pkgid: &PackageId| -> String { + let dep_path = resolve.path_to_top(pkgid); + let mut dep_path_desc = format!("package `{}`", dep_path[0]); + for dep in dep_path.iter().skip(1) { + write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); + } + dep_path_desc + }; + + bail!( + "multiple packages link to native library `{}`, \ + but a native library can be linked only once\n\ + \n\ + {}\nlinks to native library `{}`\n\ + \n\ + {}\nalso links to native library `{}`", + lib, + describe_path(prev), + lib, + describe_path(pkg), + lib + ) + } + if !unit.pkg + .manifest() + .targets() + .iter() + .any(|t| t.is_custom_build()) + { + bail!( + "package `{}` specifies that it links to `{}` but does not \ + have a custom build script", + unit.pkg.package_id(), + lib + ) + } + self.links.insert(lib.to_string(), unit.pkg.package_id()); + Ok(()) + } +} diff --git a/src/cargo/core/compiler/context/unit_dependencies.rs b/src/cargo/core/compiler/context/unit_dependencies.rs new file mode 100644 index 000000000..924744651 --- /dev/null +++ b/src/cargo/core/compiler/context/unit_dependencies.rs @@ -0,0 +1,385 @@ +//! Constructs the dependency graph for compilation. +//! +//! Rust code is typically organized as a set of Cargo packages. The +//! dependencies between the packages themselves are stored in the +//! `Resolve` struct. However, we can't use that information as is for +//! compilation! A package typically contains several targets, or crates, +//! and these targets has inter-dependencies. For example, you need to +//! compile the `lib` target before the `bin` one, and you need to compile +//! `build.rs` before either of those. +//! +//! So, we need to lower the `Resolve`, which specifies dependencies between +//! *packages*, to a graph of dependencies between their *targets*, and this +//! is exactly what this module is doing! Well, almost exactly: another +//! complication is that we might want to compile the same target several times +//! (for example, with and without tests), so we actually build a dependency +//! graph of `Unit`s, which capture these properties. + +use super::{BuildContext, CompileMode, Kind, Unit}; +use core::dependency::Kind as DepKind; +use core::profiles::ProfileFor; +use core::{Package, Target}; +use std::collections::HashMap; +use CargoResult; + +pub fn build_unit_dependencies<'a, 'cfg>( + roots: &[Unit<'a>], + bcx: &BuildContext<'a, 'cfg>, + mut deps: &mut HashMap, Vec>>, +) -> CargoResult<()> { + for unit in roots.iter() { + // Dependencies of tests/benches should not have `panic` set. + // We check the global test mode to see if we are running in `cargo + // test` in which case we ensure all dependencies have `panic` + // cleared, and avoid building the lib thrice (once with `panic`, once + // without, once for --test). In particular, the lib included for + // doctests and examples are `Build` mode here. + let profile_for = if unit.mode.is_any_test() || bcx.build_config.test() { + ProfileFor::TestDependency + } else { + ProfileFor::Any + }; + deps_of(unit, bcx, &mut deps, profile_for)?; + } + + Ok(()) +} + +fn deps_of<'a, 'b, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + deps: &'b mut HashMap, Vec>>, + profile_for: ProfileFor, +) -> CargoResult<&'b [Unit<'a>]> { + // Currently the `deps` map does not include `profile_for`. This should + // be safe for now. `TestDependency` only exists to clear the `panic` + // flag, and you'll never ask for a `unit` with `panic` set as a + // `TestDependency`. `CustomBuild` should also be fine since if the + // requested unit's settings are the same as `Any`, `CustomBuild` can't + // affect anything else in the hierarchy. + if !deps.contains_key(unit) { + let unit_deps = compute_deps(unit, bcx, deps, profile_for)?; + let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect(); + deps.insert(*unit, to_insert); + for (unit, profile_for) in unit_deps { + deps_of(&unit, bcx, deps, profile_for)?; + } + } + Ok(deps[unit].as_ref()) +} + +/// For a package, return all targets which are registered as dependencies +/// for that package. +/// This returns a vec of `(Unit, ProfileFor)` pairs. The `ProfileFor` +/// is the profile type that should be used for dependencies of the unit. +fn compute_deps<'a, 'b, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + deps: &'b mut HashMap, Vec>>, + profile_for: ProfileFor, +) -> CargoResult, ProfileFor)>> { + if unit.mode.is_run_custom_build() { + return compute_deps_custom_build(unit, bcx, deps); + } else if unit.mode.is_doc() && !unit.mode.is_any_test() { + // Note: This does not include Doctest. + return compute_deps_doc(unit, bcx); + } + + let id = unit.pkg.package_id(); + let deps = bcx.resolve.deps(id); + let mut ret = deps.filter(|&(_id, deps)| { + assert!(deps.len() > 0); + deps.iter().any(|dep| { + // If this target is a build command, then we only want build + // dependencies, otherwise we want everything *other than* build + // dependencies. + if unit.target.is_custom_build() != dep.is_build() { + return false; + } + + // If this dependency is *not* a transitive dependency, then it + // only applies to test/example targets + if !dep.is_transitive() && !unit.target.is_test() && !unit.target.is_example() + && !unit.mode.is_any_test() + { + return false; + } + + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + if !bcx.dep_platform_activated(dep, unit.kind) { + return false; + } + + // If the dependency is optional, then we're only activating it + // if the corresponding feature was activated + if dep.is_optional() && !bcx.resolve.features(id).contains(&*dep.name()) { + return false; + } + + // If we've gotten past all that, then this dependency is + // actually used! + true + }) + }).filter_map(|(id, _)| match bcx.get_package(id) { + Ok(pkg) => pkg.targets().iter().find(|t| t.is_lib()).map(|t| { + let mode = check_or_build_mode(&unit.mode, t); + let unit = new_unit(bcx, pkg, t, profile_for, unit.kind.for_target(t), mode); + Ok((unit, profile_for)) + }), + Err(e) => Some(Err(e)), + }) + .collect::>>()?; + + // If this target is a build script, then what we've collected so far is + // all we need. If this isn't a build script, then it depends on the + // build script if there is one. + if unit.target.is_custom_build() { + return Ok(ret); + } + ret.extend(dep_build_script(unit, bcx)); + + // If this target is a binary, test, example, etc, then it depends on + // the library of the same package. The call to `resolve.deps` above + // didn't include `pkg` in the return values, so we need to special case + // it here and see if we need to push `(pkg, pkg_lib_target)`. + if unit.target.is_lib() && unit.mode != CompileMode::Doctest { + return Ok(ret); + } + ret.extend(maybe_lib(unit, bcx, profile_for)); + + // If any integration tests/benches are being run, make sure that + // binaries are built as well. + if !unit.mode.is_check() && unit.mode.is_any_test() + && (unit.target.is_test() || unit.target.is_bench()) + { + ret.extend( + unit.pkg + .targets() + .iter() + .filter(|t| { + let no_required_features = Vec::new(); + + t.is_bin() && + // Skip binaries with required features that have not been selected. + t.required_features().unwrap_or(&no_required_features).iter().all(|f| { + bcx.resolve.features(id).contains(f) + }) + }) + .map(|t| { + ( + // TODO: Should not be using profile_for here. Should + // instead use ProfileFor::Any so that bins are built + // with panic, but this aggravates + // https://github.com/rust-lang/cargo/issues/5444 + // Switching it will fix + // https://github.com/rust-lang/cargo/issues/5435 + new_unit( + bcx, + unit.pkg, + t, + profile_for, + unit.kind.for_target(t), + CompileMode::Build, + ), + profile_for, + ) + }), + ); + } + + Ok(ret) +} + +/// Returns the dependencies needed to run a build script. +/// +/// The `unit` provided must represent an execution of a build script, and +/// the returned set of units must all be run before `unit` is run. +fn compute_deps_custom_build<'a, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + deps: &mut HashMap, Vec>>, +) -> CargoResult, ProfileFor)>> { + // When not overridden, then the dependencies to run a build script are: + // + // 1. Compiling the build script itself + // 2. For each immediate dependency of our package which has a `links` + // key, the execution of that build script. + let not_custom_build = unit.pkg + .targets() + .iter() + .find(|t| !t.is_custom_build()) + .unwrap(); + let tmp = Unit { + pkg: unit.pkg, + target: not_custom_build, + profile: unit.profile, + kind: unit.kind, + mode: CompileMode::Build, + }; + let deps = deps_of(&tmp, bcx, deps, ProfileFor::Any)?; + Ok(deps.iter() + .filter_map(|unit| { + if !unit.target.linkable() || unit.pkg.manifest().links().is_none() { + return None; + } + dep_build_script(unit, bcx) + }) + .chain(Some(( + new_unit( + bcx, + unit.pkg, + unit.target, + ProfileFor::CustomBuild, + Kind::Host, // build scripts always compiled for the host + CompileMode::Build, + ), + // All dependencies of this unit should use profiles for custom + // builds. + ProfileFor::CustomBuild, + ))) + .collect()) +} + +/// Returns the dependencies necessary to document a package +fn compute_deps_doc<'a, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, +) -> CargoResult, ProfileFor)>> { + let deps = bcx.resolve + .deps(unit.pkg.package_id()) + .filter(|&(_id, deps)| { + deps.iter().any(|dep| match dep.kind() { + DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind), + _ => false, + }) + }) + .map(|(id, _deps)| bcx.get_package(id)); + + // To document a library, we depend on dependencies actually being + // built. If we're documenting *all* libraries, then we also depend on + // the documentation of the library being built. + let mut ret = Vec::new(); + for dep in deps { + let dep = dep?; + let lib = match dep.targets().iter().find(|t| t.is_lib()) { + Some(lib) => lib, + None => continue, + }; + // rustdoc only needs rmeta files for regular dependencies. + // However, for plugins/proc-macros, deps should be built like normal. + let mode = check_or_build_mode(&unit.mode, lib); + let lib_unit = new_unit( + bcx, + dep, + lib, + ProfileFor::Any, + unit.kind.for_target(lib), + mode, + ); + ret.push((lib_unit, ProfileFor::Any)); + if let CompileMode::Doc { deps: true } = unit.mode { + // Document this lib as well. + let doc_unit = new_unit( + bcx, + dep, + lib, + ProfileFor::Any, + unit.kind.for_target(lib), + unit.mode, + ); + ret.push((doc_unit, ProfileFor::Any)); + } + } + + // Be sure to build/run the build script for documented libraries as + ret.extend(dep_build_script(unit, bcx)); + + // If we document a binary, we need the library available + if unit.target.is_bin() { + ret.extend(maybe_lib(unit, bcx, ProfileFor::Any)); + } + Ok(ret) +} + +fn maybe_lib<'a>( + unit: &Unit<'a>, + bcx: &BuildContext, + profile_for: ProfileFor, +) -> Option<(Unit<'a>, ProfileFor)> { + unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { + let mode = check_or_build_mode(&unit.mode, t); + let unit = new_unit(bcx, unit.pkg, t, profile_for, unit.kind.for_target(t), mode); + (unit, profile_for) + }) +} + +/// If a build script is scheduled to be run for the package specified by +/// `unit`, this function will return the unit to run that build script. +/// +/// Overriding a build script simply means that the running of the build +/// script itself doesn't have any dependencies, so even in that case a unit +/// of work is still returned. `None` is only returned if the package has no +/// build script. +fn dep_build_script<'a>(unit: &Unit<'a>, bcx: &BuildContext) -> Option<(Unit<'a>, ProfileFor)> { + unit.pkg + .targets() + .iter() + .find(|t| t.is_custom_build()) + .map(|t| { + // The profile stored in the Unit is the profile for the thing + // the custom build script is running for. + ( + Unit { + pkg: unit.pkg, + target: t, + profile: bcx.profiles.get_profile_run_custom_build(&unit.profile), + kind: unit.kind, + mode: CompileMode::RunCustomBuild, + }, + ProfileFor::CustomBuild, + ) + }) +} + +/// Choose the correct mode for dependencies. +fn check_or_build_mode(mode: &CompileMode, target: &Target) -> CompileMode { + match *mode { + CompileMode::Check { .. } | CompileMode::Doc { .. } => { + if target.for_host() { + // Plugin and proc-macro targets should be compiled like + // normal. + CompileMode::Build + } else { + // Regular dependencies should not be checked with --test. + // Regular dependencies of doc targets should emit rmeta only. + CompileMode::Check { test: false } + } + } + _ => CompileMode::Build, + } +} + +fn new_unit<'a>( + bcx: &BuildContext, + pkg: &'a Package, + target: &'a Target, + profile_for: ProfileFor, + kind: Kind, + mode: CompileMode, +) -> Unit<'a> { + let profile = bcx.profiles.get_profile( + &pkg.package_id(), + bcx.ws.is_member(pkg), + profile_for, + mode, + bcx.build_config.release, + ); + Unit { + pkg, + target, + profile, + kind, + mode, + } +} diff --git a/src/cargo/core/compiler/custom_build.rs b/src/cargo/core/compiler/custom_build.rs new file mode 100644 index 000000000..f358a5eaa --- /dev/null +++ b/src/cargo/core/compiler/custom_build.rs @@ -0,0 +1,623 @@ +use std::collections::hash_map::{Entry, HashMap}; +use std::collections::{BTreeSet, HashSet}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::{Arc, Mutex}; + +use core::PackageId; +use util::errors::{CargoResult, CargoResultExt}; +use util::machine_message; +use util::{self, internal, paths, profile}; +use util::{Cfg, Freshness}; + +use super::job::Work; +use super::{fingerprint, Context, Kind, TargetConfig, Unit}; + +/// Contains the parsed output of a custom build script. +#[derive(Clone, Debug, Hash)] +pub struct BuildOutput { + /// Paths to pass to rustc with the `-L` flag + pub library_paths: Vec, + /// Names and link kinds of libraries, suitable for the `-l` flag + pub library_links: Vec, + /// Various `--cfg` flags to pass to the compiler + pub cfgs: Vec, + /// Additional environment variables to run the compiler with. + pub env: Vec<(String, String)>, + /// Metadata to pass to the immediate dependencies + pub metadata: Vec<(String, String)>, + /// Paths to trigger a rerun of this build script. + pub rerun_if_changed: Vec, + /// Environment variables which, when changed, will cause a rebuild. + pub rerun_if_env_changed: Vec, + /// Warnings generated by this build, + pub warnings: Vec, +} + +/// Map of packages to build info +pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; + +/// Build info and overrides +pub struct BuildState { + pub outputs: Mutex, + overrides: HashMap<(String, Kind), BuildOutput>, +} + +#[derive(Default)] +pub struct BuildScripts { + // Cargo will use this `to_link` vector to add -L flags to compiles as we + // propagate them upwards towards the final build. Note, however, that we + // need to preserve the ordering of `to_link` to be topologically sorted. + // This will ensure that build scripts which print their paths properly will + // correctly pick up the files they generated (if there are duplicates + // elsewhere). + // + // To preserve this ordering, the (id, kind) is stored in two places, once + // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain + // this as we're building interactively below to ensure that the memory + // usage here doesn't blow up too much. + // + // For more information, see #2354 + pub to_link: Vec<(PackageId, Kind)>, + seen_to_link: HashSet<(PackageId, Kind)>, + pub plugins: BTreeSet, +} + +pub struct BuildDeps { + pub build_script_output: PathBuf, + pub rerun_if_changed: Vec, + pub rerun_if_env_changed: Vec, +} + +/// Prepares a `Work` that executes the target as a custom build script. +/// +/// The `req` given is the requirement which this run of the build script will +/// prepare work for. If the requirement is specified as both the target and the +/// host platforms it is assumed that the two are equal and the build script is +/// only run once (not twice). +pub fn prepare<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<(Work, Work, Freshness)> { + let _p = profile::start(format!( + "build script prepare: {}/{}", + unit.pkg, + unit.target.name() + )); + + let key = (unit.pkg.package_id().clone(), unit.kind); + let overridden = cx.build_script_overridden.contains(&key); + let (work_dirty, work_fresh) = if overridden { + (Work::noop(), Work::noop()) + } else { + build_work(cx, unit)? + }; + + // Now that we've prep'd our work, build the work needed to manage the + // fingerprint and then start returning that upwards. + let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?; + + Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness)) +} + +fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> { + assert!(unit.mode.is_run_custom_build()); + let bcx = &cx.bcx; + let dependencies = cx.dep_targets(unit); + let build_script_unit = dependencies + .iter() + .find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build()) + .expect("running a script not depending on an actual script"); + let script_output = cx.files().build_script_dir(build_script_unit); + let build_output = cx.files().build_script_out_dir(unit); + + // Building the command to execute + let to_exec = script_output.join(unit.target.name()); + + // Start preparing the process to execute, starting out with some + // environment variables. Note that the profile-related environment + // variables are not set with this the build script's profile but rather the + // package's library profile. + // NOTE: If you add any profile flags, be sure to update + // `Profiles::get_profile_run_custom_build` so that those flags get + // carried over. + let to_exec = to_exec.into_os_string(); + let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; + let debug = unit.profile.debuginfo.unwrap_or(0) != 0; + cmd.env("OUT_DIR", &build_output) + .env("CARGO_MANIFEST_DIR", unit.pkg.root()) + .env("NUM_JOBS", &bcx.jobs().to_string()) + .env( + "TARGET", + &match unit.kind { + Kind::Host => &bcx.host_triple(), + Kind::Target => bcx.target_triple(), + }, + ) + .env("DEBUG", debug.to_string()) + .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) + .env( + "PROFILE", + if bcx.build_config.release { + "release" + } else { + "debug" + }, + ) + .env("HOST", &bcx.host_triple()) + .env("RUSTC", &bcx.rustc.path) + .env("RUSTDOC", &*bcx.config.rustdoc()?) + .inherit_jobserver(&cx.jobserver); + + if let Some(ref linker) = bcx.target_config.linker { + cmd.env("RUSTC_LINKER", linker); + } + + if let Some(links) = unit.pkg.manifest().links() { + cmd.env("CARGO_MANIFEST_LINKS", links); + } + + // Be sure to pass along all enabled features for this package, this is the + // last piece of statically known information that we have. + for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { + cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); + } + + let mut cfg_map = HashMap::new(); + for cfg in bcx.cfg(unit.kind) { + match *cfg { + Cfg::Name(ref n) => { + cfg_map.insert(n.clone(), None); + } + Cfg::KeyPair(ref k, ref v) => { + if let Some(ref mut values) = + *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new())) + { + values.push(v.clone()) + } + } + } + } + for (k, v) in cfg_map { + let k = format!("CARGO_CFG_{}", super::envify(&k)); + match v { + Some(list) => { + cmd.env(&k, list.join(",")); + } + None => { + cmd.env(&k, ""); + } + } + } + + // Gather the set of native dependencies that this package has along with + // some other variables to close over. + // + // This information will be used at build-time later on to figure out which + // sorts of variables need to be discovered at that time. + let lib_deps = { + dependencies + .iter() + .filter_map(|unit| { + if unit.mode.is_run_custom_build() { + Some(( + unit.pkg.manifest().links().unwrap().to_string(), + unit.pkg.package_id().clone(), + )) + } else { + None + } + }) + .collect::>() + }; + let pkg_name = unit.pkg.to_string(); + let build_state = Arc::clone(&cx.build_state); + let id = unit.pkg.package_id().clone(); + let (output_file, err_file, root_output_file) = { + let build_output_parent = build_output.parent().unwrap(); + let output_file = build_output_parent.join("output"); + let err_file = build_output_parent.join("stderr"); + let root_output_file = build_output_parent.join("root-output"); + (output_file, err_file, root_output_file) + }; + let root_output = cx.files().target_root().to_path_buf(); + let all = ( + id.clone(), + pkg_name.clone(), + Arc::clone(&build_state), + output_file.clone(), + root_output.clone(), + ); + let build_scripts = super::load_build_deps(cx, unit); + let kind = unit.kind; + let json_messages = bcx.build_config.json_messages(); + + // Check to see if the build script has already run, and if it has keep + // track of whether it has told us about some explicit dependencies + let prev_root_output = paths::read_bytes(&root_output_file) + .and_then(|bytes| util::bytes2path(&bytes)) + .unwrap_or_else(|_| cmd.get_cwd().unwrap().to_path_buf()); + let prev_output = + BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output).ok(); + let deps = BuildDeps::new(&output_file, prev_output.as_ref()); + cx.build_explicit_deps.insert(*unit, deps); + + fs::create_dir_all(&script_output)?; + fs::create_dir_all(&build_output)?; + + // Prepare the unit of "dirty work" which will actually run the custom build + // command. + // + // Note that this has to do some extra work just before running the command + // to determine extra environment variables and such. + let dirty = Work::new(move |state| { + // Make sure that OUT_DIR exists. + // + // If we have an old build directory, then just move it into place, + // otherwise create it! + if fs::metadata(&build_output).is_err() { + fs::create_dir(&build_output).chain_err(|| { + internal( + "failed to create script output directory for \ + build command", + ) + })?; + } + + // For all our native lib dependencies, pick up their metadata to pass + // along to this custom build command. We're also careful to augment our + // dynamic library search path in case the build script depended on any + // native dynamic libraries. + { + let build_state = build_state.outputs.lock().unwrap(); + for (name, id) in lib_deps { + let key = (id.clone(), kind); + let state = build_state.get(&key).ok_or_else(|| { + internal(format!( + "failed to locate build state for env \ + vars: {}/{:?}", + id, kind + )) + })?; + let data = &state.metadata; + for &(ref key, ref value) in data.iter() { + cmd.env( + &format!("DEP_{}_{}", super::envify(&name), super::envify(key)), + value, + ); + } + } + if let Some(build_scripts) = build_scripts { + super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &root_output)?; + } + } + + // And now finally, run the build command itself! + state.running(&cmd); + let output = cmd.exec_with_streaming( + &mut |out_line| { + state.stdout(out_line); + Ok(()) + }, + &mut |err_line| { + state.stderr(err_line); + Ok(()) + }, + true, + ).map_err(|e| { + format_err!( + "failed to run custom build command for `{}`\n{}", + pkg_name, + e + ) + })?; + + // After the build command has finished running, we need to be sure to + // remember all of its output so we can later discover precisely what it + // was, even if we don't run the build command again (due to freshness). + // + // This is also the location where we provide feedback into the build + // state informing what variables were discovered via our script as + // well. + paths::write(&output_file, &output.stdout)?; + paths::write(&err_file, &output.stderr)?; + paths::write(&root_output_file, util::path2bytes(&root_output)?)?; + let parsed_output = + BuildOutput::parse(&output.stdout, &pkg_name, &root_output, &root_output)?; + + if json_messages { + let library_paths = parsed_output + .library_paths + .iter() + .map(|l| l.display().to_string()) + .collect::>(); + machine_message::emit(&machine_message::BuildScript { + package_id: &id, + linked_libs: &parsed_output.library_links, + linked_paths: &library_paths, + cfgs: &parsed_output.cfgs, + env: &parsed_output.env, + }); + } + + build_state.insert(id, kind, parsed_output); + Ok(()) + }); + + // Now that we've prepared our work-to-do, we need to prepare the fresh work + // itself to run when we actually end up just discarding what we calculated + // above. + let fresh = Work::new(move |_tx| { + let (id, pkg_name, build_state, output_file, root_output) = all; + let output = match prev_output { + Some(output) => output, + None => { + BuildOutput::parse_file(&output_file, &pkg_name, &prev_root_output, &root_output)? + } + }; + build_state.insert(id, kind, output); + Ok(()) + }); + + Ok((dirty, fresh)) +} + +impl BuildState { + pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState { + let mut overrides = HashMap::new(); + let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host)); + let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target)); + for ((name, output), kind) in i1.chain(i2) { + overrides.insert((name.clone(), kind), output.clone()); + } + BuildState { + outputs: Mutex::new(HashMap::new()), + overrides, + } + } + + fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) { + self.outputs.lock().unwrap().insert((id, kind), output); + } +} + +impl BuildOutput { + pub fn parse_file( + path: &Path, + pkg_name: &str, + root_output_when_generated: &Path, + root_output: &Path, + ) -> CargoResult { + let contents = paths::read_bytes(path)?; + BuildOutput::parse(&contents, pkg_name, root_output_when_generated, root_output) + } + + // Parses the output of a script. + // The `pkg_name` is used for error messages. + pub fn parse( + input: &[u8], + pkg_name: &str, + root_output_when_generated: &Path, + root_output: &Path, + ) -> CargoResult { + let mut library_paths = Vec::new(); + let mut library_links = Vec::new(); + let mut cfgs = Vec::new(); + let mut env = Vec::new(); + let mut metadata = Vec::new(); + let mut rerun_if_changed = Vec::new(); + let mut rerun_if_env_changed = Vec::new(); + let mut warnings = Vec::new(); + let whence = format!("build script of `{}`", pkg_name); + + for line in input.split(|b| *b == b'\n') { + let line = match str::from_utf8(line) { + Ok(line) => line.trim(), + Err(..) => continue, + }; + let mut iter = line.splitn(2, ':'); + if iter.next() != Some("cargo") { + // skip this line since it doesn't start with "cargo:" + continue; + } + let data = match iter.next() { + Some(val) => val, + None => continue, + }; + + // getting the `key=value` part of the line + let mut iter = data.splitn(2, '='); + let key = iter.next(); + let value = iter.next(); + let (key, value) = match (key, value) { + (Some(a), Some(b)) => (a, b.trim_right()), + // line started with `cargo:` but didn't match `key=value` + _ => bail!("Wrong output in {}: `{}`", whence, line), + }; + + let path = |val: &str| match Path::new(val).strip_prefix(root_output_when_generated) { + Ok(path) => root_output.join(path), + Err(_) => PathBuf::from(val), + }; + + match key { + "rustc-flags" => { + let (paths, links) = BuildOutput::parse_rustc_flags(value, &whence)?; + library_links.extend(links.into_iter()); + library_paths.extend(paths.into_iter()); + } + "rustc-link-lib" => library_links.push(value.to_string()), + "rustc-link-search" => library_paths.push(path(value)), + "rustc-cfg" => cfgs.push(value.to_string()), + "rustc-env" => env.push(BuildOutput::parse_rustc_env(value, &whence)?), + "warning" => warnings.push(value.to_string()), + "rerun-if-changed" => rerun_if_changed.push(path(value)), + "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), + _ => metadata.push((key.to_string(), value.to_string())), + } + } + + Ok(BuildOutput { + library_paths, + library_links, + cfgs, + env, + metadata, + rerun_if_changed, + rerun_if_env_changed, + warnings, + }) + } + + pub fn parse_rustc_flags( + value: &str, + whence: &str, + ) -> CargoResult<(Vec, Vec)> { + let value = value.trim(); + let mut flags_iter = value + .split(|c: char| c.is_whitespace()) + .filter(|w| w.chars().any(|c| !c.is_whitespace())); + let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); + while let Some(flag) = flags_iter.next() { + if flag != "-l" && flag != "-L" { + bail!( + "Only `-l` and `-L` flags are allowed in {}: `{}`", + whence, + value + ) + } + let value = match flags_iter.next() { + Some(v) => v, + None => bail!( + "Flag in rustc-flags has no value in {}: `{}`", + whence, + value + ), + }; + match flag { + "-l" => library_links.push(value.to_string()), + "-L" => library_paths.push(PathBuf::from(value)), + + // was already checked above + _ => bail!("only -l and -L flags are allowed"), + }; + } + Ok((library_paths, library_links)) + } + + pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> { + let mut iter = value.splitn(2, '='); + let name = iter.next(); + let val = iter.next(); + match (name, val) { + (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), + _ => bail!("Variable rustc-env has no value in {}: {}", whence, value), + } + } +} + +impl BuildDeps { + pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { + BuildDeps { + build_script_output: output_file.to_path_buf(), + rerun_if_changed: output + .map(|p| &p.rerun_if_changed) + .cloned() + .unwrap_or_default(), + rerun_if_env_changed: output + .map(|p| &p.rerun_if_env_changed) + .cloned() + .unwrap_or_default(), + } + } +} + +/// Compute the `build_scripts` map in the `Context` which tracks what build +/// scripts each package depends on. +/// +/// The global `build_scripts` map lists for all (package, kind) tuples what set +/// of packages' build script outputs must be considered. For example this lists +/// all dependencies' `-L` flags which need to be propagated transitively. +/// +/// The given set of targets to this function is the initial set of +/// targets/profiles which are being built. +pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { + let mut ret = HashMap::new(); + for unit in units { + build(&mut ret, cx, unit)?; + } + cx.build_scripts + .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v)))); + return Ok(()); + + // Recursive function to build up the map we're constructing. This function + // memoizes all of its return values as it goes along. + fn build<'a, 'b, 'cfg>( + out: &'a mut HashMap, BuildScripts>, + cx: &mut Context<'b, 'cfg>, + unit: &Unit<'b>, + ) -> CargoResult<&'a BuildScripts> { + // Do a quick pre-flight check to see if we've already calculated the + // set of dependencies. + if out.contains_key(unit) { + return Ok(&out[unit]); + } + + { + let key = unit.pkg + .manifest() + .links() + .map(|l| (l.to_string(), unit.kind)); + let build_state = &cx.build_state; + if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { + let key = (unit.pkg.package_id().clone(), unit.kind); + cx.build_script_overridden.insert(key.clone()); + build_state + .outputs + .lock() + .unwrap() + .insert(key, output.clone()); + } + } + + let mut ret = BuildScripts::default(); + + if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { + add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); + } + + // We want to invoke the compiler deterministically to be cache-friendly + // to rustc invocation caching schemes, so be sure to generate the same + // set of build script dependency orderings via sorting the targets that + // come out of the `Context`. + let mut targets = cx.dep_targets(unit); + targets.sort_by_key(|u| u.pkg.package_id()); + + for unit in targets.iter() { + let dep_scripts = build(out, cx, unit)?; + + if unit.target.for_host() { + ret.plugins + .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned()); + } else if unit.target.linkable() { + for &(ref pkg, kind) in dep_scripts.to_link.iter() { + add_to_link(&mut ret, pkg, kind); + } + } + } + + match out.entry(*unit) { + Entry::Vacant(entry) => Ok(entry.insert(ret)), + Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), + } + } + + // When adding an entry to 'to_link' we only actually push it on if the + // script hasn't seen it yet (e.g. we don't push on duplicates). + fn add_to_link(scripts: &mut BuildScripts, pkg: &PackageId, kind: Kind) { + if scripts.seen_to_link.insert((pkg.clone(), kind)) { + scripts.to_link.push((pkg.clone(), kind)); + } + } +} diff --git a/src/cargo/core/compiler/fingerprint.rs b/src/cargo/core/compiler/fingerprint.rs new file mode 100644 index 000000000..284bc5993 --- /dev/null +++ b/src/cargo/core/compiler/fingerprint.rs @@ -0,0 +1,841 @@ +use std::env; +use std::fs; +use std::hash::{self, Hasher}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; + +use filetime::FileTime; +use serde::de::{self, Deserialize}; +use serde::ser::{self, Serialize}; +use serde_json; + +use core::{Edition, Package, TargetKind}; +use util; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; +use util::{internal, profile, Dirty, Fresh, Freshness}; + +use super::{Context, BuildContext, FileFlavor, Unit}; +use super::custom_build::BuildDeps; +use super::job::Work; + +/// A tuple result of the `prepare_foo` functions in this module. +/// +/// The first element of the triple is whether the target in question is +/// currently fresh or not, and the second two elements are work to perform when +/// the target is dirty or fresh, respectively. +/// +/// Both units of work are always generated because a fresh package may still be +/// rebuilt if some upstream dependency changes. +pub type Preparation = (Freshness, Work, Work); + +/// Prepare the necessary work for the fingerprint for a specific target. +/// +/// When dealing with fingerprints, cargo gets to choose what granularity +/// "freshness" is considered at. One option is considering freshness at the +/// package level. This means that if anything in a package changes, the entire +/// package is rebuilt, unconditionally. This simplicity comes at a cost, +/// however, in that test-only changes will cause libraries to be rebuilt, which +/// is quite unfortunate! +/// +/// The cost was deemed high enough that fingerprints are now calculated at the +/// layer of a target rather than a package. Each target can then be kept track +/// of separately and only rebuilt as necessary. This requires cargo to +/// understand what the inputs are to a target, so we drive rustc with the +/// --dep-info flag to learn about all input files to a unit of compilation. +/// +/// This function will calculate the fingerprint for a target and prepare the +/// work necessary to either write the fingerprint or copy over all fresh files +/// from the old directories to their new locations. +pub fn prepare_target<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult { + let _p = profile::start(format!( + "fingerprint: {} / {}", + unit.pkg.package_id(), + unit.target.name() + )); + let bcx = cx.bcx; + let new = cx.files().fingerprint_dir(unit); + let loc = new.join(&filename(cx, unit)); + + debug!("fingerprint at: {}", loc.display()); + + let fingerprint = calculate(cx, unit)?; + let compare = compare_old_fingerprint(&loc, &*fingerprint); + log_compare(unit, &compare); + + // If our comparison failed (e.g. we're going to trigger a rebuild of this + // crate), then we also ensure the source of the crate passes all + // verification checks before we build it. + // + // The `Source::verify` method is intended to allow sources to execute + // pre-build checks to ensure that the relevant source code is all + // up-to-date and as expected. This is currently used primarily for + // directory sources which will use this hook to perform an integrity check + // on all files in the source to ensure they haven't changed. If they have + // changed then an error is issued. + if compare.is_err() { + let source_id = unit.pkg.package_id().source_id(); + let sources = bcx.packages.sources(); + let source = sources + .get(source_id) + .ok_or_else(|| internal("missing package source"))?; + source.verify(unit.pkg.package_id())?; + } + + let root = cx.files().out_dir(unit); + let mut missing_outputs = false; + if unit.mode.is_doc() { + missing_outputs = !root.join(unit.target.crate_name()) + .join("index.html") + .exists(); + } else { + for output in cx.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + missing_outputs |= !output.path.exists(); + if let Some(ref link_dst) = output.hardlink { + missing_outputs |= !link_dst.exists(); + } + } + } + + let allow_failure = bcx.extra_args_for(unit).is_some(); + let target_root = cx.files().target_root().to_path_buf(); + let write_fingerprint = Work::new(move |_| { + match fingerprint.update_local(&target_root) { + Ok(()) => {} + Err(..) if allow_failure => return Ok(()), + Err(e) => return Err(e), + } + write_fingerprint(&loc, &*fingerprint) + }); + + let fresh = compare.is_ok() && !missing_outputs; + Ok(( + if fresh { Fresh } else { Dirty }, + write_fingerprint, + Work::noop(), + )) +} + +/// A fingerprint can be considered to be a "short string" representing the +/// state of a world for a package. +/// +/// If a fingerprint ever changes, then the package itself needs to be +/// recompiled. Inputs to the fingerprint include source code modifications, +/// compiler flags, compiler version, etc. This structure is not simply a +/// `String` due to the fact that some fingerprints cannot be calculated lazily. +/// +/// Path sources, for example, use the mtime of the corresponding dep-info file +/// as a fingerprint (all source files must be modified *before* this mtime). +/// This dep-info file is not generated, however, until after the crate is +/// compiled. As a result, this structure can be thought of as a fingerprint +/// to-be. The actual value can be calculated via `hash()`, but the operation +/// may fail as some files may not have been generated. +/// +/// Note that dependencies are taken into account for fingerprints because rustc +/// requires that whenever an upstream crate is recompiled that all downstream +/// dependants are also recompiled. This is typically tracked through +/// `DependencyQueue`, but it also needs to be retained here because Cargo can +/// be interrupted while executing, losing the state of the `DependencyQueue` +/// graph. +#[derive(Serialize, Deserialize)] +pub struct Fingerprint { + rustc: u64, + features: String, + target: u64, + profile: u64, + path: u64, + #[serde(serialize_with = "serialize_deps", deserialize_with = "deserialize_deps")] + deps: Vec<(String, String, Arc)>, + local: Vec, + #[serde(skip_serializing, skip_deserializing)] + memoized_hash: Mutex>, + rustflags: Vec, + edition: Edition, +} + +fn serialize_deps(deps: &[(String, String, Arc)], ser: S) -> Result +where + S: ser::Serializer, +{ + deps.iter() + .map(|&(ref a, ref b, ref c)| (a, b, c.hash())) + .collect::>() + .serialize(ser) +} + +fn deserialize_deps<'de, D>(d: D) -> Result)>, D::Error> +where + D: de::Deserializer<'de>, +{ + let decoded = >::deserialize(d)?; + Ok(decoded + .into_iter() + .map(|(pkg_id, name, hash)| { + ( + pkg_id, + name, + Arc::new(Fingerprint { + rustc: 0, + target: 0, + profile: 0, + path: 0, + local: vec![LocalFingerprint::Precalculated(String::new())], + features: String::new(), + deps: Vec::new(), + memoized_hash: Mutex::new(Some(hash)), + edition: Edition::Edition2015, + rustflags: Vec::new(), + }), + ) + }) + .collect()) +} + +#[derive(Serialize, Deserialize, Hash)] +enum LocalFingerprint { + Precalculated(String), + MtimeBased(MtimeSlot, PathBuf), + EnvBased(String, Option), +} + +impl LocalFingerprint { + fn mtime(root: &Path, mtime: Option, path: &Path) -> LocalFingerprint { + let mtime = MtimeSlot(Mutex::new(mtime)); + assert!(path.is_absolute()); + let path = path.strip_prefix(root).unwrap_or(path); + LocalFingerprint::MtimeBased(mtime, path.to_path_buf()) + } +} + +struct MtimeSlot(Mutex>); + +impl Fingerprint { + fn update_local(&self, root: &Path) -> CargoResult<()> { + let mut hash_busted = false; + for local in self.local.iter() { + match *local { + LocalFingerprint::MtimeBased(ref slot, ref path) => { + let path = root.join(path); + let mtime = paths::mtime(&path)?; + *slot.0.lock().unwrap() = Some(mtime); + } + LocalFingerprint::EnvBased(..) | LocalFingerprint::Precalculated(..) => continue, + } + hash_busted = true; + } + + if hash_busted { + *self.memoized_hash.lock().unwrap() = None; + } + Ok(()) + } + + fn hash(&self) -> u64 { + if let Some(s) = *self.memoized_hash.lock().unwrap() { + return s; + } + let ret = util::hash_u64(self); + *self.memoized_hash.lock().unwrap() = Some(ret); + ret + } + + fn compare(&self, old: &Fingerprint) -> CargoResult<()> { + if self.rustc != old.rustc { + bail!("rust compiler has changed") + } + if self.features != old.features { + bail!( + "features have changed: {} != {}", + self.features, + old.features + ) + } + if self.target != old.target { + bail!("target configuration has changed") + } + if self.path != old.path { + bail!("path to the compiler has changed") + } + if self.profile != old.profile { + bail!("profile configuration has changed") + } + if self.rustflags != old.rustflags { + bail!("RUSTFLAGS has changed") + } + if self.local.len() != old.local.len() { + bail!("local lens changed"); + } + if self.edition != old.edition { + bail!("edition changed") + } + for (new, old) in self.local.iter().zip(&old.local) { + match (new, old) { + ( + &LocalFingerprint::Precalculated(ref a), + &LocalFingerprint::Precalculated(ref b), + ) => { + if a != b { + bail!("precalculated components have changed: {} != {}", a, b) + } + } + ( + &LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap), + &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp), + ) => { + let on_disk_mtime = on_disk_mtime.0.lock().unwrap(); + let previously_built_mtime = previously_built_mtime.0.lock().unwrap(); + + let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) { + (None, None) => false, + (Some(_), None) | (None, Some(_)) => true, + (Some(on_disk), Some(previously_built)) => on_disk > previously_built, + }; + + if should_rebuild { + bail!( + "mtime based components have changed: previously {:?} now {:?}, \ + paths are {:?} and {:?}", + *previously_built_mtime, + *on_disk_mtime, + ap, + bp + ) + } + } + ( + &LocalFingerprint::EnvBased(ref akey, ref avalue), + &LocalFingerprint::EnvBased(ref bkey, ref bvalue), + ) => { + if *akey != *bkey { + bail!("env vars changed: {} != {}", akey, bkey); + } + if *avalue != *bvalue { + bail!( + "env var `{}` changed: previously {:?} now {:?}", + akey, + bvalue, + avalue + ) + } + } + _ => bail!("local fingerprint type has changed"), + } + } + + if self.deps.len() != old.deps.len() { + bail!("number of dependencies has changed") + } + for (a, b) in self.deps.iter().zip(old.deps.iter()) { + if a.1 != b.1 || a.2.hash() != b.2.hash() { + bail!("new ({}) != old ({})", a.0, b.0) + } + } + Ok(()) + } +} + +impl hash::Hash for Fingerprint { + fn hash(&self, h: &mut H) { + let Fingerprint { + rustc, + ref features, + target, + path, + profile, + ref deps, + ref local, + edition, + ref rustflags, + .. + } = *self; + ( + rustc, features, target, path, profile, local, edition, rustflags, + ).hash(h); + + h.write_usize(deps.len()); + for &(ref pkg_id, ref name, ref fingerprint) in deps { + pkg_id.hash(h); + name.hash(h); + // use memoized dep hashes to avoid exponential blowup + h.write_u64(Fingerprint::hash(fingerprint)); + } + } +} + +impl hash::Hash for MtimeSlot { + fn hash(&self, h: &mut H) { + self.0.lock().unwrap().hash(h) + } +} + +impl ser::Serialize for MtimeSlot { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.0 + .lock() + .unwrap() + .map(|ft| (ft.unix_seconds(), ft.nanoseconds())) + .serialize(s) + } +} + +impl<'de> de::Deserialize<'de> for MtimeSlot { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let kind: Option<(i64, u32)> = de::Deserialize::deserialize(d)?; + Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| { + FileTime::from_unix_time(s, n) + })))) + } +} + +/// Calculates the fingerprint for a package/target pair. +/// +/// This fingerprint is used by Cargo to learn about when information such as: +/// +/// * A non-path package changes (changes version, changes revision, etc). +/// * Any dependency changes +/// * The compiler changes +/// * The set of features a package is built with changes +/// * The profile a target is compiled with changes (e.g. opt-level changes) +/// +/// Information like file modification time is only calculated for path +/// dependencies and is calculated in `calculate_target_fresh`. +fn calculate<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult> { + let bcx = cx.bcx; + if let Some(s) = cx.fingerprints.get(unit) { + return Ok(Arc::clone(s)); + } + + // Next, recursively calculate the fingerprint for all of our dependencies. + // + // Skip the fingerprints of build scripts as they may not always be + // available and the dirtiness propagation for modification is tracked + // elsewhere. Also skip fingerprints of binaries because they don't actually + // induce a recompile, they're just dependencies in the sense that they need + // to be built. + let deps = cx.dep_targets(unit); + let deps = deps.iter() + .filter(|u| !u.target.is_custom_build() && !u.target.is_bin()) + .map(|dep| { + calculate(cx, dep).and_then(|fingerprint| { + let name = cx.bcx.extern_crate_name(unit, dep)?; + Ok((dep.pkg.package_id().to_string(), name, fingerprint)) + }) + }) + .collect::>>()?; + + // And finally, calculate what our own local fingerprint is + let local = if use_dep_info(unit) { + let dep_info = dep_info_loc(cx, unit); + let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?; + LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info) + } else { + let fingerprint = pkg_fingerprint(&cx.bcx, unit.pkg)?; + LocalFingerprint::Precalculated(fingerprint) + }; + let mut deps = deps; + deps.sort_by(|&(ref a, _, _), &(ref b, _, _)| a.cmp(b)); + let extra_flags = if unit.mode.is_doc() { + bcx.rustdocflags_args(unit)? + } else { + bcx.rustflags_args(unit)? + }; + let profile_hash = util::hash_u64(&( + &unit.profile, + unit.mode, + bcx.extra_args_for(unit), + cx.incremental_args(unit)?, + cx.used_in_plugin.contains(unit), // used when passing panic=abort + )); + let fingerprint = Arc::new(Fingerprint { + rustc: util::hash_u64(&bcx.rustc.verbose_version), + target: util::hash_u64(&unit.target), + profile: profile_hash, + // Note that .0 is hashed here, not .1 which is the cwd. That doesn't + // actually affect the output artifact so there's no need to hash it. + path: util::hash_u64(&super::path_args(&cx.bcx, unit).0), + features: format!("{:?}", bcx.resolve.features_sorted(unit.pkg.package_id())), + deps, + local: vec![local], + memoized_hash: Mutex::new(None), + edition: unit.pkg.manifest().edition(), + rustflags: extra_flags, + }); + cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); + Ok(fingerprint) +} + +// We want to use the mtime for files if we're a path source, but if we're a +// git/registry source, then the mtime of files may fluctuate, but they won't +// change so long as the source itself remains constant (which is the +// responsibility of the source) +fn use_dep_info(unit: &Unit) -> bool { + let path = unit.pkg.summary().source_id().is_path(); + !unit.mode.is_doc() && path +} + +/// Prepare the necessary work for the fingerprint of a build command. +/// +/// Build commands are located on packages, not on targets. Additionally, we +/// don't have --dep-info to drive calculation of the fingerprint of a build +/// command. This brings up an interesting predicament which gives us a few +/// options to figure out whether a build command is dirty or not: +/// +/// 1. A build command is dirty if *any* file in a package changes. In theory +/// all files are candidate for being used by the build command. +/// 2. A build command is dirty if any file in a *specific directory* changes. +/// This may lose information as it may require files outside of the specific +/// directory. +/// 3. A build command must itself provide a dep-info-like file stating how it +/// should be considered dirty or not. +/// +/// The currently implemented solution is option (1), although it is planned to +/// migrate to option (2) in the near future. +pub fn prepare_build_cmd<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult { + let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id())); + let new = cx.files().fingerprint_dir(unit); + let loc = new.join("build"); + + debug!("fingerprint at: {}", loc.display()); + + let (local, output_path) = build_script_local_fingerprints(cx, unit)?; + let mut fingerprint = Fingerprint { + rustc: 0, + target: 0, + profile: 0, + path: 0, + features: String::new(), + deps: Vec::new(), + local, + memoized_hash: Mutex::new(None), + edition: Edition::Edition2015, + rustflags: Vec::new(), + }; + let compare = compare_old_fingerprint(&loc, &fingerprint); + log_compare(unit, &compare); + + // When we write out the fingerprint, we may want to actually change the + // kind of fingerprint being recorded. If we started out, then the previous + // run of the build script (or if it had never run before) may indicate to + // use the `Precalculated` variant with the `pkg_fingerprint`. If the build + // script then prints `rerun-if-changed`, however, we need to record what's + // necessary for that fingerprint. + // + // Hence, if there were some `rerun-if-changed` directives forcibly change + // the kind of fingerprint by reinterpreting the dependencies output by the + // build script. + let state = Arc::clone(&cx.build_state); + let key = (unit.pkg.package_id().clone(), unit.kind); + let pkg_root = unit.pkg.root().to_path_buf(); + let target_root = cx.files().target_root().to_path_buf(); + let write_fingerprint = Work::new(move |_| { + if let Some(output_path) = output_path { + let outputs = state.outputs.lock().unwrap(); + let outputs = &outputs[&key]; + if !outputs.rerun_if_changed.is_empty() || !outputs.rerun_if_env_changed.is_empty() { + let deps = BuildDeps::new(&output_path, Some(outputs)); + fingerprint.local = local_fingerprints_deps(&deps, &target_root, &pkg_root); + fingerprint.update_local(&target_root)?; + } + } + write_fingerprint(&loc, &fingerprint) + }); + + Ok(( + if compare.is_ok() { Fresh } else { Dirty }, + write_fingerprint, + Work::noop(), + )) +} + +fn build_script_local_fingerprints<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<(Vec, Option)> { + let state = cx.build_state.outputs.lock().unwrap(); + // First up, if this build script is entirely overridden, then we just + // return the hash of what we overrode it with. + // + // Note that the `None` here means that we don't want to update the local + // fingerprint afterwards because this is all just overridden. + if let Some(output) = state.get(&(unit.pkg.package_id().clone(), unit.kind)) { + debug!("override local fingerprints deps"); + let s = format!( + "overridden build state with hash: {}", + util::hash_u64(output) + ); + return Ok((vec![LocalFingerprint::Precalculated(s)], None)); + } + + // Next up we look at the previously listed dependencies for the build + // script. If there are none then we're in the "old mode" where we just + // assume that we're changed if anything in the packaged changed. The + // `Some` here though means that we want to update our local fingerprints + // after we're done as running this build script may have created more + // dependencies. + let deps = &cx.build_explicit_deps[unit]; + let output = deps.build_script_output.clone(); + if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { + debug!("old local fingerprints deps"); + let s = pkg_fingerprint(&cx.bcx, unit.pkg)?; + return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output))); + } + + // Ok so now we're in "new mode" where we can have files listed as + // dependencies as well as env vars listed as dependencies. Process them all + // here. + Ok(( + local_fingerprints_deps(deps, cx.files().target_root(), unit.pkg.root()), + Some(output), + )) +} + +fn local_fingerprints_deps( + deps: &BuildDeps, + target_root: &Path, + pkg_root: &Path, +) -> Vec { + debug!("new local fingerprints deps"); + let mut local = Vec::new(); + if !deps.rerun_if_changed.is_empty() { + let output = &deps.build_script_output; + let deps = deps.rerun_if_changed.iter().map(|p| pkg_root.join(p)); + let mtime = mtime_if_fresh(output, deps); + local.push(LocalFingerprint::mtime(target_root, mtime, output)); + } + + for var in deps.rerun_if_env_changed.iter() { + let val = env::var(var).ok(); + local.push(LocalFingerprint::EnvBased(var.clone(), val)); + } + + local +} + +fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> { + let hash = fingerprint.hash(); + debug!("write fingerprint: {}", loc.display()); + paths::write(loc, util::to_hex(hash).as_bytes())?; + paths::write( + &loc.with_extension("json"), + &serde_json::to_vec(&fingerprint).unwrap(), + )?; + Ok(()) +} + +/// Prepare for work when a package starts to build +pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> { + let new1 = cx.files().fingerprint_dir(unit); + + if fs::metadata(&new1).is_err() { + fs::create_dir(&new1)?; + } + + Ok(()) +} + +pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { + cx.files() + .fingerprint_dir(unit) + .join(&format!("dep-{}", filename(cx, unit))) +} + +fn compare_old_fingerprint(loc: &Path, new_fingerprint: &Fingerprint) -> CargoResult<()> { + let old_fingerprint_short = paths::read(loc)?; + let new_hash = new_fingerprint.hash(); + + if util::to_hex(new_hash) == old_fingerprint_short { + return Ok(()); + } + + let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; + let old_fingerprint = serde_json::from_str(&old_fingerprint_json) + .chain_err(|| internal("failed to deserialize json"))?; + new_fingerprint.compare(&old_fingerprint) +} + +fn log_compare(unit: &Unit, compare: &CargoResult<()>) { + let ce = match *compare { + Ok(..) => return, + Err(ref e) => e, + }; + info!("fingerprint error for {}: {}", unit.pkg, ce); + + for cause in ce.causes().skip(1) { + info!(" cause: {}", cause); + } +} + +// Parse the dep-info into a list of paths +pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult>> { + let data = match paths::read_bytes(dep_info) { + Ok(data) => data, + Err(_) => return Ok(None), + }; + let paths = data.split(|&x| x == 0) + .filter(|x| !x.is_empty()) + .map(|p| util::bytes2path(p).map(|p| pkg.root().join(p))) + .collect::, _>>()?; + if paths.is_empty() { + Ok(None) + } else { + Ok(Some(paths)) + } +} + +fn dep_info_mtime_if_fresh(pkg: &Package, dep_info: &Path) -> CargoResult> { + if let Some(paths) = parse_dep_info(pkg, dep_info)? { + Ok(mtime_if_fresh(dep_info, paths.iter())) + } else { + Ok(None) + } +} + +fn pkg_fingerprint(bcx: &BuildContext, pkg: &Package) -> CargoResult { + let source_id = pkg.package_id().source_id(); + let sources = bcx.packages.sources(); + + let source = sources + .get(source_id) + .ok_or_else(|| internal("missing package source"))?; + source.fingerprint(pkg) +} + +fn mtime_if_fresh(output: &Path, paths: I) -> Option +where + I: IntoIterator, + I::Item: AsRef, +{ + let mtime = match paths::mtime(output) { + Ok(mtime) => mtime, + Err(..) => return None, + }; + + let any_stale = paths.into_iter().any(|path| { + let path = path.as_ref(); + let mtime2 = match paths::mtime(path) { + Ok(mtime) => mtime, + Err(..) => { + info!("stale: {} -- missing", path.display()); + return true; + } + }; + if mtime2 > mtime { + info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime); + true + } else { + false + } + }); + + if any_stale { + None + } else { + Some(mtime) + } +} + +fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { + // file_stem includes metadata hash. Thus we have a different + // fingerprint for every metadata hash version. This works because + // even if the package is fresh, we'll still link the fresh target + let file_stem = cx.files().file_stem(unit); + let kind = match *unit.target.kind() { + TargetKind::Lib(..) => "lib", + TargetKind::Bin => "bin", + TargetKind::Test => "integration-test", + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => "example", + TargetKind::Bench => "bench", + TargetKind::CustomBuild => "build-script", + }; + let flavor = if unit.mode.is_any_test() { + "test-" + } else if unit.mode.is_doc() { + "doc-" + } else { + "" + }; + format!("{}{}-{}", flavor, kind, file_stem) +} + +/// Parses the dep-info file coming out of rustc into a Cargo-specific format. +/// +/// This function will parse `rustc_dep_info` as a makefile-style dep info to +/// learn about the all files which a crate depends on. This is then +/// re-serialized into the `cargo_dep_info` path in a Cargo-specific format. +/// +/// The `pkg_root` argument here is the absolute path to the directory +/// containing `Cargo.toml` for this crate that was compiled. The paths listed +/// in the rustc dep-info file may or may not be absolute but we'll want to +/// consider all of them relative to the `root` specified. +/// +/// The `rustc_cwd` argument is the absolute path to the cwd of the compiler +/// when it was invoked. +/// +/// The serialized Cargo format will contain a list of files, all of which are +/// relative if they're under `root`. or absolute if they're elsewhere. +pub fn translate_dep_info( + rustc_dep_info: &Path, + cargo_dep_info: &Path, + pkg_root: &Path, + rustc_cwd: &Path, +) -> CargoResult<()> { + let target = parse_rustc_dep_info(rustc_dep_info)?; + let deps = &target + .get(0) + .ok_or_else(|| internal("malformed dep-info format, no targets".to_string()))? + .1; + + let mut new_contents = Vec::new(); + for file in deps { + let absolute = rustc_cwd.join(file); + let path = absolute.strip_prefix(pkg_root).unwrap_or(&absolute); + new_contents.extend(util::path2bytes(path)?); + new_contents.push(0); + } + paths::write(cargo_dep_info, &new_contents)?; + Ok(()) +} + +pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult)>> { + let contents = paths::read(rustc_dep_info)?; + contents + .lines() + .filter_map(|l| l.find(": ").map(|i| (l, i))) + .map(|(line, pos)| { + let target = &line[..pos]; + let mut deps = line[pos + 2..].split_whitespace(); + + let mut ret = Vec::new(); + while let Some(s) = deps.next() { + let mut file = s.to_string(); + while file.ends_with('\\') { + file.pop(); + file.push(' '); + file.push_str(deps.next().ok_or_else(|| { + internal("malformed dep-info format, trailing \\".to_string()) + })?); + } + ret.push(file); + } + Ok((target.to_string(), ret)) + }) + .collect() +} diff --git a/src/cargo/core/compiler/job.rs b/src/cargo/core/compiler/job.rs new file mode 100644 index 000000000..61e979f1d --- /dev/null +++ b/src/cargo/core/compiler/job.rs @@ -0,0 +1,71 @@ +use std::fmt; + +use util::{CargoResult, Dirty, Fresh, Freshness}; +use super::job_queue::JobState; + +pub struct Job { + dirty: Work, + fresh: Work, +} + +/// Each proc should send its description before starting. +/// It should send either once or close immediately. +pub struct Work { + inner: Box FnBox<&'a JobState<'b>, CargoResult<()>> + Send>, +} + +trait FnBox { + fn call_box(self: Box, a: A) -> R; +} + +impl R> FnBox for F { + fn call_box(self: Box, a: A) -> R { + (*self)(a) + } +} + +impl Work { + pub fn new(f: F) -> Work + where + F: FnOnce(&JobState) -> CargoResult<()> + Send + 'static, + { + Work { inner: Box::new(f) } + } + + pub fn noop() -> Work { + Work::new(|_| Ok(())) + } + + pub fn call(self, tx: &JobState) -> CargoResult<()> { + self.inner.call_box(tx) + } + + pub fn then(self, next: Work) -> Work { + Work::new(move |state| { + self.call(state)?; + next.call(state) + }) + } +} + +impl Job { + /// Create a new job representing a unit of work. + pub fn new(dirty: Work, fresh: Work) -> Job { + Job { dirty, fresh } + } + + /// Consumes this job by running it, returning the result of the + /// computation. + pub fn run(self, fresh: Freshness, state: &JobState) -> CargoResult<()> { + match fresh { + Fresh => self.fresh.call(state), + Dirty => self.dirty.call(state), + } + } +} + +impl fmt::Debug for Job { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Job {{ ... }}") + } +} diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs new file mode 100644 index 000000000..235dccc93 --- /dev/null +++ b/src/cargo/core/compiler/job_queue.rs @@ -0,0 +1,478 @@ +use std::collections::hash_map::HashMap; +use std::collections::HashSet; +use std::fmt; +use std::io; +use std::mem; +use std::sync::mpsc::{channel, Receiver, Sender}; + +use crossbeam::{self, Scope}; +use jobserver::{Acquired, HelperThread}; + +use core::profiles::Profile; +use core::{PackageId, Target}; +use handle_error; +use util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder}; +use util::{Config, DependencyQueue, Dirty, Fresh, Freshness}; + +use super::job::Job; +use super::{BuildContext, CompileMode, Context, Kind, Unit}; + +/// A management structure of the entire dependency graph to compile. +/// +/// This structure is backed by the `DependencyQueue` type and manages the +/// actual compilation step of each package. Packages enqueue units of work and +/// then later on the entire graph is processed and compiled. +pub struct JobQueue<'a> { + queue: DependencyQueue, Vec<(Job, Freshness)>>, + tx: Sender>, + rx: Receiver>, + active: usize, + pending: HashMap, PendingBuild>, + compiled: HashSet<&'a PackageId>, + documented: HashSet<&'a PackageId>, + counts: HashMap<&'a PackageId, usize>, + is_release: bool, +} + +/// A helper structure for metadata about the state of a building package. +struct PendingBuild { + /// Number of jobs currently active + amt: usize, + /// Current freshness state of this package. Any dirty target within a + /// package will cause the entire package to become dirty. + fresh: Freshness, +} + +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +struct Key<'a> { + pkg: &'a PackageId, + target: &'a Target, + profile: Profile, + kind: Kind, + mode: CompileMode, +} + +pub struct JobState<'a> { + tx: Sender>, +} + +enum Message<'a> { + Run(String), + Stdout(String), + Stderr(String), + Token(io::Result), + Finish(Key<'a>, CargoResult<()>), +} + +impl<'a> JobState<'a> { + pub fn running(&self, cmd: &ProcessBuilder) { + let _ = self.tx.send(Message::Run(cmd.to_string())); + } + + pub fn stdout(&self, out: &str) { + let _ = self.tx.send(Message::Stdout(out.to_string())); + } + + pub fn stderr(&self, err: &str) { + let _ = self.tx.send(Message::Stderr(err.to_string())); + } +} + +impl<'a> JobQueue<'a> { + pub fn new<'cfg>(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a> { + let (tx, rx) = channel(); + JobQueue { + queue: DependencyQueue::new(), + tx, + rx, + active: 0, + pending: HashMap::new(), + compiled: HashSet::new(), + documented: HashSet::new(), + counts: HashMap::new(), + is_release: bcx.build_config.release, + } + } + + pub fn enqueue<'cfg>( + &mut self, + cx: &Context<'a, 'cfg>, + unit: &Unit<'a>, + job: Job, + fresh: Freshness, + ) -> CargoResult<()> { + let key = Key::new(unit); + let deps = key.dependencies(cx)?; + self.queue + .queue(Fresh, key, Vec::new(), &deps) + .push((job, fresh)); + *self.counts.entry(key.pkg).or_insert(0) += 1; + Ok(()) + } + + /// Execute all jobs necessary to build the dependency graph. + /// + /// This function will spawn off `config.jobs()` workers to build all of the + /// necessary dependencies, in order. Freshness is propagated as far as + /// possible along each dependency chain. + pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> { + let _p = profile::start("executing the job graph"); + self.queue.queue_finished(); + + // We need to give a handle to the send half of our message queue to the + // jobserver helper thread. Unfortunately though we need the handle to be + // `'static` as that's typically what's required when spawning a + // thread! + // + // To work around this we transmute the `Sender` to a static lifetime. + // we're only sending "longer living" messages and we should also + // destroy all references to the channel before this function exits as + // the destructor for the `helper` object will ensure the associated + // thread is no longer running. + // + // As a result, this `transmute` to a longer lifetime should be safe in + // practice. + let tx = self.tx.clone(); + let tx = unsafe { mem::transmute::>, Sender>>(tx) }; + let helper = cx.jobserver + .clone() + .into_helper_thread(move |token| { + drop(tx.send(Message::Token(token))); + }) + .chain_err(|| "failed to create helper thread for jobserver management")?; + + crossbeam::scope(|scope| self.drain_the_queue(cx, scope, &helper)) + } + + fn drain_the_queue( + &mut self, + cx: &mut Context, + scope: &Scope<'a>, + jobserver_helper: &HelperThread, + ) -> CargoResult<()> { + let mut tokens = Vec::new(); + let mut queue = Vec::new(); + trace!("queue: {:#?}", self.queue); + + // Iteratively execute the entire dependency graph. Each turn of the + // loop starts out by scheduling as much work as possible (up to the + // maximum number of parallel jobs we have tokens for). A local queue + // is maintained separately from the main dependency queue as one + // dequeue may actually dequeue quite a bit of work (e.g. 10 binaries + // in one project). + // + // After a job has finished we update our internal state if it was + // successful and otherwise wait for pending work to finish if it failed + // and then immediately return. + let mut error = None; + loop { + // Dequeue as much work as we can, learning about everything + // possible that can run. Note that this is also the point where we + // start requesting job tokens. Each job after the first needs to + // request a token. + while let Some((fresh, key, jobs)) = self.queue.dequeue() { + let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| f.combine(fresh)); + self.pending.insert( + key, + PendingBuild { + amt: jobs.len(), + fresh: total_fresh, + }, + ); + for (job, f) in jobs { + queue.push((key, job, f.combine(fresh))); + if self.active + queue.len() > 0 { + jobserver_helper.request_token(); + } + } + } + + // Now that we've learned of all possible work that we can execute + // try to spawn it so long as we've got a jobserver token which says + // we're able to perform some parallel work. + while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() { + let (key, job, fresh) = queue.remove(0); + self.run(key, fresh, job, cx.bcx.config, scope)?; + } + + // If after all that we're not actually running anything then we're + // done! + if self.active == 0 { + break; + } + + // And finally, before we block waiting for the next event, drop any + // excess tokens we may have accidentally acquired. Due to how our + // jobserver interface is architected we may acquire a token that we + // don't actually use, and if this happens just relinquish it back + // to the jobserver itself. + tokens.truncate(self.active - 1); + + match self.rx.recv().unwrap() { + Message::Run(cmd) => { + cx.bcx + .config + .shell() + .verbose(|c| c.status("Running", &cmd))?; + } + Message::Stdout(out) => { + if cx.bcx.config.extra_verbose() { + println!("{}", out); + } + } + Message::Stderr(err) => { + if cx.bcx.config.extra_verbose() { + writeln!(cx.bcx.config.shell().err(), "{}", err)?; + } + } + Message::Finish(key, result) => { + info!("end: {:?}", key); + self.active -= 1; + if self.active > 0 { + assert!(!tokens.is_empty()); + drop(tokens.pop()); + } + match result { + Ok(()) => self.finish(key, cx)?, + Err(e) => { + let msg = "The following warnings were emitted during compilation:"; + self.emit_warnings(Some(msg), &key, cx)?; + + if self.active > 0 { + error = Some(format_err!("build failed")); + handle_error(e, &mut *cx.bcx.config.shell()); + cx.bcx.config.shell().warn( + "build failed, waiting for other \ + jobs to finish...", + )?; + } else { + error = Some(e); + } + } + } + } + Message::Token(acquired_token) => { + tokens.push(acquired_token.chain_err(|| "failed to acquire jobserver token")?); + } + } + } + + let build_type = if self.is_release { "release" } else { "dev" }; + // NOTE: This may be a bit inaccurate, since this may not display the + // profile for what was actually built. Profile overrides can change + // these settings, and in some cases different targets are built with + // different profiles. To be accurate, it would need to collect a + // list of Units built, and maybe display a list of the different + // profiles used. However, to keep it simple and compatible with old + // behavior, we just display what the base profile is. + let profile = cx.bcx.profiles.base_profile(self.is_release); + let mut opt_type = String::from(if profile.opt_level.as_str() == "0" { + "unoptimized" + } else { + "optimized" + }); + if profile.debuginfo.is_some() { + opt_type += " + debuginfo"; + } + + let time_elapsed = { + use std::fmt::Write; + + let duration = cx.bcx.config.creation_time().elapsed(); + let mut s = String::new(); + let secs = duration.as_secs(); + + if secs >= 60 { + // We can safely unwrap, as writing to a `String` never errors + write!(s, "{}m ", secs / 60).unwrap(); + }; + + // We can safely unwrap, as writing to a `String` never errors + write!( + s, + "{}.{:02}s", + secs % 60, + duration.subsec_nanos() / 10_000_000 + ).unwrap(); + + s + }; + + if self.queue.is_empty() { + let message = format!( + "{} [{}] target(s) in {}", + build_type, opt_type, time_elapsed + ); + cx.bcx.config.shell().status("Finished", message)?; + Ok(()) + } else if let Some(e) = error { + Err(e) + } else { + debug!("queue: {:#?}", self.queue); + Err(internal("finished with jobs still left in the queue")) + } + } + + /// Executes a job in the `scope` given, pushing the spawned thread's + /// handled onto `threads`. + fn run( + &mut self, + key: Key<'a>, + fresh: Freshness, + job: Job, + config: &Config, + scope: &Scope<'a>, + ) -> CargoResult<()> { + info!("start: {:?}", key); + + self.active += 1; + *self.counts.get_mut(key.pkg).unwrap() -= 1; + + let my_tx = self.tx.clone(); + let doit = move || { + let res = job.run(fresh, &JobState { tx: my_tx.clone() }); + my_tx.send(Message::Finish(key, res)).unwrap(); + }; + match fresh { + Freshness::Fresh => doit(), + Freshness::Dirty => { + scope.spawn(doit); + } + } + + // Print out some nice progress information + self.note_working_on(config, &key, fresh)?; + + Ok(()) + } + + fn emit_warnings(&self, msg: Option<&str>, key: &Key<'a>, cx: &mut Context) -> CargoResult<()> { + let output = cx.build_state.outputs.lock().unwrap(); + let bcx = &mut cx.bcx; + if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) { + if let Some(msg) = msg { + if !output.warnings.is_empty() { + writeln!(bcx.config.shell().err(), "{}\n", msg)?; + } + } + + for warning in output.warnings.iter() { + bcx.config.shell().warn(warning)?; + } + + if !output.warnings.is_empty() && msg.is_some() { + // Output an empty line. + writeln!(bcx.config.shell().err(), "")?; + } + } + + Ok(()) + } + + fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { + if key.mode.is_run_custom_build() && cx.bcx.show_warnings(key.pkg) { + self.emit_warnings(None, &key, cx)?; + } + + let state = self.pending.get_mut(&key).unwrap(); + state.amt -= 1; + if state.amt == 0 { + self.queue.finish(&key, state.fresh); + } + Ok(()) + } + + // This isn't super trivial because we don't want to print loads and + // loads of information to the console, but we also want to produce a + // faithful representation of what's happening. This is somewhat nuanced + // as a package can start compiling *very* early on because of custom + // build commands and such. + // + // In general, we try to print "Compiling" for the first nontrivial task + // run for a package, regardless of when that is. We then don't print + // out any more information for a package after we've printed it once. + fn note_working_on( + &mut self, + config: &Config, + key: &Key<'a>, + fresh: Freshness, + ) -> CargoResult<()> { + if (self.compiled.contains(key.pkg) && !key.mode.is_doc()) + || (self.documented.contains(key.pkg) && key.mode.is_doc()) + { + return Ok(()); + } + + match fresh { + // Any dirty stage which runs at least one command gets printed as + // being a compiled package + Dirty => { + if key.mode.is_doc() { + // Skip Doctest + if !key.mode.is_any_test() { + self.documented.insert(key.pkg); + config.shell().status("Documenting", key.pkg)?; + } + } else { + self.compiled.insert(key.pkg); + if key.mode.is_check() { + config.shell().status("Checking", key.pkg)?; + } else { + config.shell().status("Compiling", key.pkg)?; + } + } + } + Fresh if self.counts[key.pkg] == 0 => { + self.compiled.insert(key.pkg); + config.shell().verbose(|c| c.status("Fresh", key.pkg))?; + } + Fresh => {} + } + Ok(()) + } +} + +impl<'a> Key<'a> { + fn new(unit: &Unit<'a>) -> Key<'a> { + Key { + pkg: unit.pkg.package_id(), + target: unit.target, + profile: unit.profile, + kind: unit.kind, + mode: unit.mode, + } + } + + fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult>> { + let unit = Unit { + pkg: cx.bcx.get_package(self.pkg)?, + target: self.target, + profile: self.profile, + kind: self.kind, + mode: self.mode, + }; + let targets = cx.dep_targets(&unit); + Ok(targets + .iter() + .filter_map(|unit| { + // Binaries aren't actually needed to *compile* tests, just to run + // them, so we don't include this dependency edge in the job graph. + if self.target.is_test() && unit.target.is_bin() { + None + } else { + Some(Key::new(unit)) + } + }) + .collect()) + } +} + +impl<'a> fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{} => {}/{} => {:?}", + self.pkg, self.target, self.profile, self.kind + ) + } +} diff --git a/src/cargo/core/compiler/layout.rs b/src/cargo/core/compiler/layout.rs new file mode 100644 index 000000000..c29b55768 --- /dev/null +++ b/src/cargo/core/compiler/layout.rs @@ -0,0 +1,205 @@ +//! Management of the directory layout of a build +//! +//! The directory layout is a little tricky at times, hence a separate file to +//! house this logic. The current layout looks like this: +//! +//! ```ignore +//! # This is the root directory for all output, the top-level package +//! # places all of its output here. +//! target/ +//! +//! # This is the root directory for all output of *dependencies* +//! deps/ +//! +//! # Root directory for all compiled examples +//! examples/ +//! +//! # This is the location at which the output of all custom build +//! # commands are rooted +//! build/ +//! +//! # Each package gets its own directory where its build script and +//! # script output are placed +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Each directory package has a `out` directory where output +//! # is placed. +//! out/ +//! +//! # This is the location at which the output of all old custom build +//! # commands are rooted +//! native/ +//! +//! # Each package gets its own directory for where its output is +//! # placed. We can't track exactly what's getting put in here, so +//! # we just assume that all relevant output is in these +//! # directories. +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Directory used to store incremental data for the compiler (when +//! # incremental is enabled. +//! incremental/ +//! +//! # Hidden directory that holds all of the fingerprint files for all +//! # packages +//! .fingerprint/ +//! ``` + +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use core::Workspace; +use util::{CargoResult, Config, FileLock, Filesystem}; + +/// Contains the paths of all target output locations. +/// +/// See module docs for more information. +pub struct Layout { + root: PathBuf, + deps: PathBuf, + native: PathBuf, + build: PathBuf, + incremental: PathBuf, + fingerprint: PathBuf, + examples: PathBuf, + /// The lockfile for a build, will be unlocked when this struct is `drop`ped. + _lock: FileLock, +} + +pub fn is_bad_artifact_name(name: &str) -> bool { + ["deps", "examples", "build", "native", "incremental"] + .iter() + .any(|&reserved| reserved == name) +} + +impl Layout { + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + /// + /// Differs from `at` in that this calculates the root path from the workspace target directory, + /// adding the target triple and the profile (debug, release, ...). + pub fn new(ws: &Workspace, triple: Option<&str>, dest: &str) -> CargoResult { + let mut path = ws.target_dir(); + // Flexible target specifications often point at filenames, so interpret + // the target triple as a Path and then just use the file stem as the + // component for the directory name. + if let Some(triple) = triple { + path.push(Path::new(triple) + .file_stem() + .ok_or_else(|| format_err!("invalid target"))?); + } + path.push(dest); + Layout::at(ws.config(), path) + } + + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + pub fn at(config: &Config, root: Filesystem) -> CargoResult { + // For now we don't do any more finer-grained locking on the artifact + // directory, so just lock the entire thing for the duration of this + // compile. + let lock = root.open_rw(".cargo-lock", config, "build directory")?; + let root = root.into_path_unlocked(); + + Ok(Layout { + deps: root.join("deps"), + native: root.join("native"), + build: root.join("build"), + incremental: root.join("incremental"), + fingerprint: root.join(".fingerprint"), + examples: root.join("examples"), + root, + _lock: lock, + }) + } + + #[cfg(not(target_os = "macos"))] + fn exclude_from_backups(&self, _: &Path) {} + + #[cfg(target_os = "macos")] + /// Marks files or directories as excluded from Time Machine on macOS + /// + /// This is recommended to prevent derived/temporary files from bloating backups. + fn exclude_from_backups(&self, path: &Path) { + use std::ptr; + use core_foundation::{number, string, url}; + use core_foundation::base::TCFType; + + // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey + let is_excluded_key: Result = "NSURLIsExcludedFromBackupKey".parse(); + match (url::CFURL::from_path(path, false), is_excluded_key) { + (Some(path), Ok(is_excluded_key)) => unsafe { + url::CFURLSetResourcePropertyForKey( + path.as_concrete_TypeRef(), + is_excluded_key.as_concrete_TypeRef(), + number::kCFBooleanTrue as *const _, + ptr::null_mut(), + ); + }, + // Errors are ignored, since it's an optional feature and failure + // doesn't prevent Cargo from working + _ => {} + } + } + + /// Make sure all directories stored in the Layout exist on the filesystem. + pub fn prepare(&mut self) -> io::Result<()> { + if fs::metadata(&self.root).is_err() { + fs::create_dir_all(&self.root)?; + } + + self.exclude_from_backups(&self.root); + + mkdir(&self.deps)?; + mkdir(&self.native)?; + mkdir(&self.incremental)?; + mkdir(&self.fingerprint)?; + mkdir(&self.examples)?; + mkdir(&self.build)?; + + return Ok(()); + + fn mkdir(dir: &Path) -> io::Result<()> { + if fs::metadata(&dir).is_err() { + fs::create_dir(dir)?; + } + Ok(()) + } + } + + /// Fetch the root path. + pub fn dest(&self) -> &Path { + &self.root + } + /// Fetch the deps path. + pub fn deps(&self) -> &Path { + &self.deps + } + /// Fetch the examples path. + pub fn examples(&self) -> &Path { + &self.examples + } + /// Fetch the root path. + pub fn root(&self) -> &Path { + &self.root + } + /// Fetch the incremental path. + pub fn incremental(&self) -> &Path { + &self.incremental + } + /// Fetch the fingerprint path. + pub fn fingerprint(&self) -> &Path { + &self.fingerprint + } + /// Fetch the build path. + pub fn build(&self) -> &Path { + &self.build + } +} diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs new file mode 100644 index 000000000..8b503aaae --- /dev/null +++ b/src/cargo/core/compiler/mod.rs @@ -0,0 +1,940 @@ +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs; +use std::io::{self, Write}; +use std::path::{self, Path, PathBuf}; +use std::sync::Arc; + +use same_file::is_same_file; +use serde_json; + +use core::profiles::{Lto, Profile}; +use core::shell::ColorChoice; +use core::{Feature, PackageId, Target}; +use util::errors::{CargoResult, CargoResultExt, Internal}; +use util::paths; +use util::{self, machine_message, Freshness, ProcessBuilder}; +use util::{internal, join_paths, profile}; + +use self::job::{Job, Work}; +use self::job_queue::JobQueue; + +use self::output_depinfo::output_depinfo; + +pub use self::build_context::{BuildContext, FileFlavor, TargetConfig, TargetInfo}; +pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; +pub use self::compilation::Compilation; +pub use self::context::{Context, Unit}; +pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts}; +pub use self::layout::is_bad_artifact_name; + +mod build_config; +mod build_context; +mod compilation; +mod context; +mod custom_build; +mod fingerprint; +mod job; +mod job_queue; +mod layout; +mod output_depinfo; + +/// Whether an object is for the host arch, or the target arch. +/// +/// These will be the same unless cross-compiling. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord)] +pub enum Kind { + Host, + Target, +} + +/// A glorified callback for executing calls to rustc. Rather than calling rustc +/// directly, we'll use an Executor, giving clients an opportunity to intercept +/// the build calls. +pub trait Executor: Send + Sync + 'static { + /// Called after a rustc process invocation is prepared up-front for a given + /// unit of work (may still be modified for runtime-known dependencies, when + /// the work is actually executed). + fn init(&self, _cx: &Context, _unit: &Unit) {} + + /// In case of an `Err`, Cargo will not continue with the build process for + /// this package. + fn exec(&self, cmd: ProcessBuilder, _id: &PackageId, _target: &Target) -> CargoResult<()> { + cmd.exec()?; + Ok(()) + } + + fn exec_json( + &self, + cmd: ProcessBuilder, + _id: &PackageId, + _target: &Target, + handle_stdout: &mut FnMut(&str) -> CargoResult<()>, + handle_stderr: &mut FnMut(&str) -> CargoResult<()>, + ) -> CargoResult<()> { + cmd.exec_with_streaming(handle_stdout, handle_stderr, false)?; + Ok(()) + } + + /// Queried when queuing each unit of work. If it returns true, then the + /// unit will always be rebuilt, independent of whether it needs to be. + fn force_rebuild(&self, _unit: &Unit) -> bool { + false + } +} + +/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's +/// default behaviour. +#[derive(Copy, Clone)] +pub struct DefaultExecutor; + +impl Executor for DefaultExecutor {} + +fn compile<'a, 'cfg: 'a>( + cx: &mut Context<'a, 'cfg>, + jobs: &mut JobQueue<'a>, + unit: &Unit<'a>, + exec: &Arc, +) -> CargoResult<()> { + let bcx = cx.bcx; + if !cx.compiled.insert(*unit) { + return Ok(()); + } + + // Build up the work to be done to compile this unit, enqueuing it once + // we've got everything constructed. + let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name())); + fingerprint::prepare_init(cx, unit)?; + cx.links.validate(bcx.resolve, unit)?; + + let (dirty, fresh, freshness) = if unit.mode.is_run_custom_build() { + custom_build::prepare(cx, unit)? + } else if unit.mode == CompileMode::Doctest { + // we run these targets later, so this is just a noop for now + (Work::noop(), Work::noop(), Freshness::Fresh) + } else { + let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?; + let work = if unit.mode.is_doc() { + rustdoc(cx, unit)? + } else { + rustc(cx, unit, exec)? + }; + // Need to link targets on both the dirty and fresh + let dirty = work.then(link_targets(cx, unit, false)?).then(dirty); + let fresh = link_targets(cx, unit, true)?.then(fresh); + + if exec.force_rebuild(unit) { + freshness = Freshness::Dirty; + } + + (dirty, fresh, freshness) + }; + jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?; + drop(p); + + // Be sure to compile all dependencies of this target as well. + for unit in cx.dep_targets(unit).iter() { + compile(cx, jobs, unit, exec)?; + } + + Ok(()) +} + +fn rustc<'a, 'cfg>( + mut cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + exec: &Arc, +) -> CargoResult { + let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; + + let name = unit.pkg.name().to_string(); + + // If this is an upstream dep we don't want warnings from, turn off all + // lints. + if !cx.bcx.show_warnings(unit.pkg.package_id()) { + rustc.arg("--cap-lints").arg("allow"); + + // If this is an upstream dep but we *do* want warnings, make sure that they + // don't fail compilation. + } else if !unit.pkg.package_id().source_id().is_path() { + rustc.arg("--cap-lints").arg("warn"); + } + + let outputs = cx.outputs(unit)?; + let root = cx.files().out_dir(unit); + let kind = unit.kind; + + // Prepare the native lib state (extra -L and -l flags) + let build_state = cx.build_state.clone(); + let current_id = unit.pkg.package_id().clone(); + let build_deps = load_build_deps(cx, unit); + + // If we are a binary and the package also contains a library, then we + // don't pass the `-l` flags. + let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib()); + let do_rename = unit.target.allows_underscores() && !unit.mode.is_any_test(); + let real_name = unit.target.name().to_string(); + let crate_name = unit.target.crate_name(); + + // XXX(Rely on target_filenames iterator as source of truth rather than rederiving filestem) + let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() { + root.join(&crate_name) + } else { + root.join(&cx.files().file_stem(unit)) + }.with_extension("d"); + let dep_info_loc = fingerprint::dep_info_loc(&mut cx, unit); + + rustc.args(&cx.bcx.rustflags_args(unit)?); + let json_messages = cx.bcx.build_config.json_messages(); + let package_id = unit.pkg.package_id().clone(); + let target = unit.target.clone(); + + exec.init(cx, unit); + let exec = exec.clone(); + + let root_output = cx.files().target_root().to_path_buf(); + let pkg_root = unit.pkg.root().to_path_buf(); + let cwd = rustc + .get_cwd() + .unwrap_or_else(|| cx.bcx.config.cwd()) + .to_path_buf(); + + return Ok(Work::new(move |state| { + // Only at runtime have we discovered what the extra -L and -l + // arguments are for native libraries, so we process those here. We + // also need to be sure to add any -L paths for our plugins to the + // dynamic library load path as a plugin's dynamic library may be + // located somewhere in there. + // Finally, if custom environment variables have been produced by + // previous build scripts, we include them in the rustc invocation. + if let Some(build_deps) = build_deps { + let build_state = build_state.outputs.lock().unwrap(); + add_native_deps( + &mut rustc, + &build_state, + &build_deps, + pass_l_flag, + ¤t_id, + )?; + add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?; + add_custom_env(&mut rustc, &build_state, ¤t_id, kind)?; + } + + for output in outputs.iter() { + // If there is both an rmeta and rlib, rustc will prefer to use the + // rlib, even if it is older. Therefore, we must delete the rlib to + // force using the new rmeta. + if output.path.extension() == Some(OsStr::new("rmeta")) { + let dst = root.join(&output.path).with_extension("rlib"); + if dst.exists() { + paths::remove_file(&dst)?; + } + } + } + + state.running(&rustc); + if json_messages { + exec.exec_json( + rustc, + &package_id, + &target, + &mut |line| { + if !line.is_empty() { + Err(internal(&format!( + "compiler stdout is not empty: `{}`", + line + ))) + } else { + Ok(()) + } + }, + &mut |line| { + // stderr from rustc can have a mix of JSON and non-JSON output + if line.starts_with('{') { + // Handle JSON lines + let compiler_message = serde_json::from_str(line).map_err(|_| { + internal(&format!("compiler produced invalid json: `{}`", line)) + })?; + + machine_message::emit(&machine_message::FromCompiler { + package_id: &package_id, + target: &target, + message: compiler_message, + }); + } else { + // Forward non-JSON to stderr + writeln!(io::stderr(), "{}", line)?; + } + Ok(()) + }, + ).chain_err(|| format!("Could not compile `{}`.", name))?; + } else { + exec.exec(rustc, &package_id, &target) + .map_err(Internal::new) + .chain_err(|| format!("Could not compile `{}`.", name))?; + } + + if do_rename && real_name != crate_name { + let dst = &outputs[0].path; + let src = dst.with_file_name( + dst.file_name() + .unwrap() + .to_str() + .unwrap() + .replace(&real_name, &crate_name), + ); + if src.exists() && src.file_name() != dst.file_name() { + fs::rename(&src, &dst) + .chain_err(|| internal(format!("could not rename crate {:?}", src)))?; + } + } + + if rustc_dep_info_loc.exists() { + fingerprint::translate_dep_info(&rustc_dep_info_loc, &dep_info_loc, &pkg_root, &cwd) + .chain_err(|| { + internal(format!( + "could not parse/generate dep info at: {}", + rustc_dep_info_loc.display() + )) + })?; + } + + Ok(()) + })); + + // Add all relevant -L and -l flags from dependencies (now calculated and + // present in `state`) to the command provided + fn add_native_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + pass_l_flag: bool, + current_id: &PackageId, + ) -> CargoResult<()> { + for key in build_scripts.to_link.iter() { + let output = build_state.get(key).ok_or_else(|| { + internal(format!( + "couldn't find build state for {}/{:?}", + key.0, key.1 + )) + })?; + for path in output.library_paths.iter() { + rustc.arg("-L").arg(path); + } + if key.0 == *current_id { + for cfg in &output.cfgs { + rustc.arg("--cfg").arg(cfg); + } + if pass_l_flag { + for name in output.library_links.iter() { + rustc.arg("-l").arg(name); + } + } + } + } + Ok(()) + } + + // Add all custom environment variables present in `state` (after they've + // been put there by one of the `build_scripts`) to the command provided. + fn add_custom_env( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + current_id: &PackageId, + kind: Kind, + ) -> CargoResult<()> { + let key = (current_id.clone(), kind); + if let Some(output) = build_state.get(&key) { + for &(ref name, ref value) in output.env.iter() { + rustc.env(name, value); + } + } + Ok(()) + } +} + +/// Link the compiled target (often of form `foo-{metadata_hash}`) to the +/// final target. This must happen during both "Fresh" and "Compile" +fn link_targets<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + fresh: bool, +) -> CargoResult { + let bcx = cx.bcx; + let outputs = cx.outputs(unit)?; + let export_dir = cx.files().export_dir(unit); + let package_id = unit.pkg.package_id().clone(); + let target = unit.target.clone(); + let profile = unit.profile; + let unit_mode = unit.mode; + let features = bcx.resolve + .features_sorted(&package_id) + .into_iter() + .map(|s| s.to_owned()) + .collect(); + let json_messages = bcx.build_config.json_messages(); + + Ok(Work::new(move |_| { + // If we're a "root crate", e.g. the target of this compilation, then we + // hard link our outputs out of the `deps` directory into the directory + // above. This means that `cargo build` will produce binaries in + // `target/debug` which one probably expects. + let mut destinations = vec![]; + for output in outputs.iter() { + let src = &output.path; + // This may have been a `cargo rustc` command which changes the + // output, so the source may not actually exist. + if !src.exists() { + continue; + } + let dst = match output.hardlink.as_ref() { + Some(dst) => dst, + None => { + destinations.push(src.display().to_string()); + continue; + } + }; + destinations.push(dst.display().to_string()); + hardlink_or_copy(src, dst)?; + if let Some(ref path) = export_dir { + if !path.exists() { + fs::create_dir_all(path)?; + } + + hardlink_or_copy(src, &path.join(dst.file_name().unwrap()))?; + } + } + + if json_messages { + let art_profile = machine_message::ArtifactProfile { + opt_level: profile.opt_level.as_str(), + debuginfo: profile.debuginfo, + debug_assertions: profile.debug_assertions, + overflow_checks: profile.overflow_checks, + test: unit_mode.is_any_test(), + }; + + machine_message::emit(&machine_message::Artifact { + package_id: &package_id, + target: &target, + profile: art_profile, + features, + filenames: destinations, + fresh, + }); + } + Ok(()) + })) +} + +fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { + debug!("linking {} to {}", src.display(), dst.display()); + if is_same_file(src, dst).unwrap_or(false) { + return Ok(()); + } + if dst.exists() { + paths::remove_file(&dst)?; + } + + let link_result = if src.is_dir() { + #[cfg(target_os = "redox")] + use std::os::redox::fs::symlink; + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + let dst_dir = dst.parent().unwrap(); + let src = if src.starts_with(dst_dir) { + src.strip_prefix(dst_dir).unwrap() + } else { + src + }; + symlink(src, dst) + } else { + fs::hard_link(src, dst) + }; + link_result + .or_else(|err| { + debug!("link failed {}. falling back to fs::copy", err); + fs::copy(src, dst).map(|_| ()) + }) + .chain_err(|| { + format!( + "failed to link or copy `{}` to `{}`", + src.display(), + dst.display() + ) + })?; + Ok(()) +} + +fn load_build_deps(cx: &Context, unit: &Unit) -> Option> { + cx.build_scripts.get(unit).cloned() +} + +// For all plugin dependencies, add their -L paths (now calculated and +// present in `state`) to the dynamic library load path for the command to +// execute. +fn add_plugin_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + root_output: &PathBuf, +) -> CargoResult<()> { + let var = util::dylib_path_envvar(); + let search_path = rustc.get_env(var).unwrap_or_default(); + let mut search_path = env::split_paths(&search_path).collect::>(); + for id in build_scripts.plugins.iter() { + let key = (id.clone(), Kind::Host); + let output = build_state + .get(&key) + .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?; + search_path.append(&mut filter_dynamic_search_path( + output.library_paths.iter(), + root_output, + )); + } + let search_path = join_paths(&search_path, var)?; + rustc.env(var, &search_path); + Ok(()) +} + +// Determine paths to add to the dynamic search path from -L entries +// +// Strip off prefixes like "native=" or "framework=" and filter out directories +// *not* inside our output directory since they are likely spurious and can cause +// clashes with system shared libraries (issue #3366). +fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec +where + I: Iterator, +{ + let mut search_path = vec![]; + for dir in paths { + let dir = match dir.to_str() { + Some(s) => { + let mut parts = s.splitn(2, '='); + match (parts.next(), parts.next()) { + (Some("native"), Some(path)) + | (Some("crate"), Some(path)) + | (Some("dependency"), Some(path)) + | (Some("framework"), Some(path)) + | (Some("all"), Some(path)) => path.into(), + _ => dir.clone(), + } + } + None => dir.clone(), + }; + if dir.starts_with(&root_output) { + search_path.push(dir); + } else { + debug!( + "Not including path {} in runtime library search path because it is \ + outside target root {}", + dir.display(), + root_output.display() + ); + } + } + search_path +} + +fn prepare_rustc<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + crate_types: &[&str], + unit: &Unit<'a>, +) -> CargoResult { + let mut base = cx.compilation.rustc_process(unit.pkg)?; + base.inherit_jobserver(&cx.jobserver); + build_base_args(cx, &mut base, unit, crate_types)?; + build_deps_args(&mut base, cx, unit)?; + Ok(base) +} + +fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { + let bcx = cx.bcx; + let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?; + rustdoc.inherit_jobserver(&cx.jobserver); + rustdoc.arg("--crate-name").arg(&unit.target.crate_name()); + add_path_args(&cx.bcx, unit, &mut rustdoc); + + if unit.kind != Kind::Host { + if let Some(ref target) = bcx.build_config.requested_target { + rustdoc.arg("--target").arg(target); + } + } + + let doc_dir = cx.files().out_dir(unit); + + // Create the documentation directory ahead of time as rustdoc currently has + // a bug where concurrent invocations will race to create this directory if + // it doesn't already exist. + fs::create_dir_all(&doc_dir)?; + + rustdoc.arg("-o").arg(doc_dir); + + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + let manifest = unit.pkg.manifest(); + + if manifest.features().is_enabled(Feature::edition()) { + rustdoc.arg("-Zunstable-options"); + rustdoc.arg(format!("--edition={}", &manifest.edition())); + } + + if let Some(ref args) = bcx.extra_args_for(unit) { + rustdoc.args(args); + } + + build_deps_args(&mut rustdoc, cx, unit)?; + + rustdoc.args(&bcx.rustdocflags_args(unit)?); + + let name = unit.pkg.name().to_string(); + let build_state = cx.build_state.clone(); + let key = (unit.pkg.package_id().clone(), unit.kind); + + Ok(Work::new(move |state| { + if let Some(output) = build_state.outputs.lock().unwrap().get(&key) { + for cfg in output.cfgs.iter() { + rustdoc.arg("--cfg").arg(cfg); + } + for &(ref name, ref value) in output.env.iter() { + rustdoc.env(name, value); + } + } + state.running(&rustdoc); + rustdoc + .exec() + .chain_err(|| format!("Could not document `{}`.", name))?; + Ok(()) + })) +} + +// The path that we pass to rustc is actually fairly important because it will +// show up in error messages (important for readability), debug information +// (important for caching), etc. As a result we need to be pretty careful how we +// actually invoke rustc. +// +// In general users don't expect `cargo build` to cause rebuilds if you change +// directories. That could be if you just change directories in the project or +// if you literally move the whole project wholesale to a new directory. As a +// result we mostly don't factor in `cwd` to this calculation. Instead we try to +// track the workspace as much as possible and we update the current directory +// of rustc/rustdoc where approrpriate. +// +// The first returned value here is the argument to pass to rustc, and the +// second is the cwd that rustc should operate in. +fn path_args(bcx: &BuildContext, unit: &Unit) -> (PathBuf, PathBuf) { + let ws_root = bcx.ws.root(); + let src = unit.target.src_path(); + assert!(src.is_absolute()); + match src.strip_prefix(ws_root) { + Ok(path) => (path.to_path_buf(), ws_root.to_path_buf()), + Err(_) => (src.to_path_buf(), unit.pkg.root().to_path_buf()), + } +} + +fn add_path_args(bcx: &BuildContext, unit: &Unit, cmd: &mut ProcessBuilder) { + let (arg, cwd) = path_args(bcx, unit); + cmd.arg(arg); + cmd.cwd(cwd); +} + +fn build_base_args<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + cmd: &mut ProcessBuilder, + unit: &Unit<'a>, + crate_types: &[&str], +) -> CargoResult<()> { + assert!(!unit.mode.is_run_custom_build()); + + let bcx = cx.bcx; + let Profile { + ref opt_level, + ref lto, + codegen_units, + debuginfo, + debug_assertions, + overflow_checks, + rpath, + ref panic, + .. + } = unit.profile; + let test = unit.mode.is_any_test(); + + cmd.arg("--crate-name").arg(&unit.target.crate_name()); + + add_path_args(&cx.bcx, unit, cmd); + + match bcx.config.shell().color_choice() { + ColorChoice::Always => { + cmd.arg("--color").arg("always"); + } + ColorChoice::Never => { + cmd.arg("--color").arg("never"); + } + ColorChoice::CargoAuto => {} + } + + if bcx.build_config.json_messages() { + cmd.arg("--error-format").arg("json"); + } + + if !test { + for crate_type in crate_types.iter() { + cmd.arg("--crate-type").arg(crate_type); + } + } + + if unit.mode.is_check() { + cmd.arg("--emit=dep-info,metadata"); + } else { + cmd.arg("--emit=dep-info,link"); + } + + let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build()) + || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg)); + if prefer_dynamic { + cmd.arg("-C").arg("prefer-dynamic"); + } + + if opt_level.as_str() != "0" { + cmd.arg("-C").arg(&format!("opt-level={}", opt_level)); + } + + // If a panic mode was configured *and* we're not ever going to be used in a + // plugin, then we can compile with that panic mode. + // + // If we're used in a plugin then we'll eventually be linked to libsyntax + // most likely which isn't compiled with a custom panic mode, so we'll just + // get an error if we actually compile with that. This fixes `panic=abort` + // crates which have plugin dependencies, but unfortunately means that + // dependencies shared between the main application and plugins must be + // compiled without `panic=abort`. This isn't so bad, though, as the main + // application will still be compiled with `panic=abort`. + if let Some(panic) = panic.as_ref() { + if !cx.used_in_plugin.contains(unit) { + cmd.arg("-C").arg(format!("panic={}", panic)); + } + } + let manifest = unit.pkg.manifest(); + + if manifest.features().is_enabled(Feature::edition()) { + cmd.arg(format!("--edition={}", manifest.edition())); + } + + // Disable LTO for host builds as prefer_dynamic and it are mutually + // exclusive. + if unit.target.can_lto() && !unit.target.for_host() { + match *lto { + Lto::Bool(false) => {} + Lto::Bool(true) => { + cmd.args(&["-C", "lto"]); + } + Lto::Named(ref s) => { + cmd.arg("-C").arg(format!("lto={}", s)); + } + } + } + + if let Some(n) = codegen_units { + // There are some restrictions with LTO and codegen-units, so we + // only add codegen units when LTO is not used. + cmd.arg("-C").arg(&format!("codegen-units={}", n)); + } + + if let Some(debuginfo) = debuginfo { + cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); + } + + if let Some(ref args) = bcx.extra_args_for(unit) { + cmd.args(args); + } + + // -C overflow-checks is implied by the setting of -C debug-assertions, + // so we only need to provide -C overflow-checks if it differs from + // the value of -C debug-assertions we would provide. + if opt_level.as_str() != "0" { + if debug_assertions { + cmd.args(&["-C", "debug-assertions=on"]); + if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + } else if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !debug_assertions { + cmd.args(&["-C", "debug-assertions=off"]); + if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + + if test && unit.target.harness() { + cmd.arg("--test"); + } else if test { + cmd.arg("--cfg").arg("test"); + } + + // We ideally want deterministic invocations of rustc to ensure that + // rustc-caching strategies like sccache are able to cache more, so sort the + // feature list here. + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + match cx.files().metadata(unit) { + Some(m) => { + cmd.arg("-C").arg(&format!("metadata={}", m)); + cmd.arg("-C").arg(&format!("extra-filename=-{}", m)); + } + None => { + cmd.arg("-C") + .arg(&format!("metadata={}", cx.files().target_short_hash(unit))); + } + } + + if rpath { + cmd.arg("-C").arg("rpath"); + } + + cmd.arg("--out-dir").arg(&cx.files().out_dir(unit)); + + fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) { + if let Some(val) = val { + let mut joined = OsString::from(prefix); + joined.push(val); + cmd.arg(key).arg(joined); + } + } + + if unit.kind == Kind::Target { + opt( + cmd, + "--target", + "", + bcx.build_config + .requested_target + .as_ref() + .map(|s| s.as_ref()), + ); + } + + opt(cmd, "-C", "ar=", bcx.ar(unit.kind).map(|s| s.as_ref())); + opt( + cmd, + "-C", + "linker=", + bcx.linker(unit.kind).map(|s| s.as_ref()), + ); + cmd.args(&cx.incremental_args(unit)?); + + Ok(()) +} + +fn build_deps_args<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<()> { + let bcx = cx.bcx; + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().deps_dir(unit)); + deps + }); + + // Be sure that the host path is also listed. This'll ensure that proc-macro + // dependencies are correctly found (for reexported macros). + if let Kind::Target = unit.kind { + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().host_deps()); + deps + }); + } + + let dep_targets = cx.dep_targets(unit); + + // If there is not one linkable target but should, rustc fails later + // on if there is an `extern crate` for it. This may turn into a hard + // error in the future, see PR #4797 + if !dep_targets + .iter() + .any(|u| !u.mode.is_doc() && u.target.linkable()) + { + if let Some(u) = dep_targets + .iter() + .find(|u| !u.mode.is_doc() && u.target.is_lib()) + { + bcx.config.shell().warn(format!( + "The package `{}` \ + provides no linkable target. The compiler might raise an error while compiling \ + `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \ + Cargo.toml. This warning might turn into a hard error in the future.", + u.target.crate_name(), + unit.target.crate_name(), + u.target.crate_name() + ))?; + } + } + + for dep in dep_targets { + if dep.mode.is_run_custom_build() { + cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep)); + } + if dep.target.linkable() && !dep.mode.is_doc() { + link_to(cmd, cx, unit, &dep)?; + } + } + + return Ok(()); + + fn link_to<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + current: &Unit<'a>, + dep: &Unit<'a>, + ) -> CargoResult<()> { + let bcx = cx.bcx; + for output in cx.outputs(dep)?.iter() { + if output.flavor != FileFlavor::Linkable { + continue; + } + let mut v = OsString::new(); + let name = bcx.extern_crate_name(current, dep)?; + v.push(name); + v.push("="); + v.push(cx.files().out_dir(dep)); + v.push(&path::MAIN_SEPARATOR.to_string()); + v.push(&output.path.file_name().unwrap()); + cmd.arg("--extern").arg(&v); + } + Ok(()) + } +} + +fn envify(s: &str) -> String { + s.chars() + .flat_map(|c| c.to_uppercase()) + .map(|c| if c == '-' { '_' } else { c }) + .collect() +} + +impl Kind { + fn for_target(&self, target: &Target) -> Kind { + // Once we start compiling for the `Host` kind we continue doing so, but + // if we are a `Target` kind and then we start compiling for a target + // that needs to be on the host we lift ourselves up to `Host` + match *self { + Kind::Host => Kind::Host, + Kind::Target if target.for_host() => Kind::Host, + Kind::Target => Kind::Target, + } + } +} diff --git a/src/cargo/core/compiler/output_depinfo.rs b/src/cargo/core/compiler/output_depinfo.rs new file mode 100644 index 000000000..6142e32ee --- /dev/null +++ b/src/cargo/core/compiler/output_depinfo.rs @@ -0,0 +1,125 @@ +use std::collections::{BTreeSet, HashSet}; +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::path::{Path, PathBuf}; + +use super::{fingerprint, Context, Unit}; +use util::paths; +use util::{internal, CargoResult}; + +fn render_filename>(path: P, basedir: Option<&str>) -> CargoResult { + let path = path.as_ref(); + let relpath = match basedir { + None => path, + Some(base) => match path.strip_prefix(base) { + Ok(relpath) => relpath, + _ => path, + }, + }; + relpath + .to_str() + .ok_or_else(|| internal("path not utf-8")) + .map(|f| f.replace(" ", "\\ ")) +} + +fn add_deps_for_unit<'a, 'b>( + deps: &mut BTreeSet, + context: &mut Context<'a, 'b>, + unit: &Unit<'a>, + visited: &mut HashSet>, +) -> CargoResult<()> { + if !visited.insert(*unit) { + return Ok(()); + } + + // units representing the execution of a build script don't actually + // generate a dep info file, so we just keep on going below + if !unit.mode.is_run_custom_build() { + // Add dependencies from rustc dep-info output (stored in fingerprint directory) + let dep_info_loc = fingerprint::dep_info_loc(context, unit); + if let Some(paths) = fingerprint::parse_dep_info(unit.pkg, &dep_info_loc)? { + for path in paths { + deps.insert(path); + } + } else { + debug!( + "can't find dep_info for {:?} {}", + unit.pkg.package_id(), + unit.target + ); + return Err(internal("dep_info missing")); + } + } + + // Add rerun-if-changed dependencies + let key = (unit.pkg.package_id().clone(), unit.kind); + if let Some(output) = context.build_state.outputs.lock().unwrap().get(&key) { + for path in &output.rerun_if_changed { + deps.insert(path.into()); + } + } + + // Recursively traverse all transitive dependencies + for dep_unit in context.dep_targets(unit).iter() { + let source_id = dep_unit.pkg.package_id().source_id(); + if source_id.is_path() { + add_deps_for_unit(deps, context, dep_unit, visited)?; + } + } + Ok(()) +} + +pub fn output_depinfo<'a, 'b>(cx: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> { + let bcx = cx.bcx; + let mut deps = BTreeSet::new(); + let mut visited = HashSet::new(); + let success = add_deps_for_unit(&mut deps, cx, unit, &mut visited).is_ok(); + let basedir_string; + let basedir = match bcx.config.get_path("build.dep-info-basedir")? { + Some(value) => { + basedir_string = value + .val + .as_os_str() + .to_str() + .ok_or_else(|| internal("build.dep-info-basedir path not utf-8"))? + .to_string(); + Some(basedir_string.as_str()) + } + None => None, + }; + let deps = deps.iter() + .map(|f| render_filename(f, basedir)) + .collect::>>()?; + + for output in cx.outputs(unit)?.iter() { + if let Some(ref link_dst) = output.hardlink { + let output_path = link_dst.with_extension("d"); + if success { + let target_fn = render_filename(link_dst, basedir)?; + + // If nothing changed don't recreate the file which could alter + // its mtime + if let Ok(previous) = fingerprint::parse_rustc_dep_info(&output_path) { + if previous.len() == 1 && previous[0].0 == target_fn && previous[0].1 == deps { + continue; + } + } + + // Otherwise write it all out + let mut outfile = BufWriter::new(File::create(output_path)?); + write!(outfile, "{}:", target_fn)?; + for dep in &deps { + write!(outfile, " {}", dep)?; + } + writeln!(outfile, "")?; + + // dep-info generation failed, so delete output file. This will + // usually cause the build system to always rerun the build + // rule, which is correct if inefficient. + } else if output_path.exists() { + paths::remove_file(output_path)?; + } + } + } + Ok(()) +} diff --git a/src/cargo/core/dependency.rs b/src/cargo/core/dependency.rs new file mode 100644 index 000000000..441e34a2a --- /dev/null +++ b/src/cargo/core/dependency.rs @@ -0,0 +1,415 @@ +use std::fmt; +use std::rc::Rc; +use std::str::FromStr; + +use semver::VersionReq; +use semver::ReqParseError; +use serde::ser; + +use core::{PackageId, SourceId, Summary}; +use core::interning::InternedString; +use util::{Cfg, CfgExpr, Config}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +/// Information about a dependency requested by a Cargo manifest. +/// Cheap to copy. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +pub struct Dependency { + inner: Rc, +} + +/// The data underlying a Dependency. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +struct Inner { + name: InternedString, + source_id: SourceId, + registry_id: Option, + req: VersionReq, + specified_req: bool, + kind: Kind, + only_match_name: bool, + rename: Option, + + optional: bool, + default_features: bool, + features: Vec, + + // This dependency should be used only for this platform. + // `None` means *all platforms*. + platform: Option, +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Platform { + Name(String), + Cfg(CfgExpr), +} + +#[derive(Serialize)] +struct SerializedDependency<'a> { + name: &'a str, + source: &'a SourceId, + req: String, + kind: Kind, + rename: Option<&'a str>, + + optional: bool, + uses_default_features: bool, + features: &'a [String], + target: Option<&'a Platform>, +} + +impl ser::Serialize for Dependency { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + let string_features: Vec<_> = self.features().iter().map(|s| s.to_string()).collect(); + SerializedDependency { + name: &*self.name(), + source: self.source_id(), + req: self.version_req().to_string(), + kind: self.kind(), + optional: self.is_optional(), + uses_default_features: self.uses_default_features(), + features: &string_features, + target: self.platform(), + rename: self.rename(), + }.serialize(s) + } +} + +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)] +pub enum Kind { + Normal, + Development, + Build, +} + +fn parse_req_with_deprecated( + req: &str, + extra: Option<(&PackageId, &Config)>, +) -> CargoResult { + match VersionReq::parse(req) { + Err(e) => { + let (inside, config) = match extra { + Some(pair) => pair, + None => return Err(e.into()), + }; + match e { + ReqParseError::DeprecatedVersionRequirement(requirement) => { + let msg = format!( + "\ +parsed version requirement `{}` is no longer valid + +Previous versions of Cargo accepted this malformed requirement, +but it is being deprecated. This was found when parsing the manifest +of {} {}, and the correct version requirement is `{}`. + +This will soon become a hard error, so it's either recommended to +update to a fixed version or contact the upstream maintainer about +this warning. +", + req, + inside.name(), + inside.version(), + requirement + ); + config.shell().warn(&msg)?; + + Ok(requirement) + } + e => Err(e.into()), + } + } + Ok(v) => Ok(v), + } +} + +impl ser::Serialize for Kind { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + Kind::Normal => None, + Kind::Development => Some("dev"), + Kind::Build => Some("build"), + }.serialize(s) + } +} + +impl Dependency { + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse( + name: &str, + version: Option<&str>, + source_id: &SourceId, + inside: &PackageId, + config: &Config, + ) -> CargoResult { + let arg = Some((inside, config)); + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(v, arg)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse_no_deprecated( + name: &str, + version: Option<&str>, + source_id: &SourceId, + ) -> CargoResult { + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(v, None)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + pub fn new_override(name: &str, source_id: &SourceId) -> Dependency { + assert!(!name.is_empty()); + Dependency { + inner: Rc::new(Inner { + name: InternedString::new(name), + source_id: source_id.clone(), + registry_id: None, + req: VersionReq::any(), + kind: Kind::Normal, + only_match_name: true, + optional: false, + features: Vec::new(), + default_features: true, + specified_req: false, + platform: None, + rename: None, + }), + } + } + + pub fn version_req(&self) -> &VersionReq { + &self.inner.req + } + + pub fn name(&self) -> InternedString { + self.inner.name + } + + pub fn source_id(&self) -> &SourceId { + &self.inner.source_id + } + + pub fn registry_id(&self) -> Option<&SourceId> { + self.inner.registry_id.as_ref() + } + + pub fn set_registry_id(&mut self, registry_id: &SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).registry_id = Some(registry_id.clone()); + self + } + + pub fn kind(&self) -> Kind { + self.inner.kind + } + + pub fn specified_req(&self) -> bool { + self.inner.specified_req + } + + /// If none, this dependencies must be built for all platforms. + /// If some, it must only be built for the specified platform. + pub fn platform(&self) -> Option<&Platform> { + self.inner.platform.as_ref() + } + + pub fn rename(&self) -> Option<&str> { + self.inner.rename.as_ref().map(|s| &**s) + } + + pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency { + Rc::make_mut(&mut self.inner).kind = kind; + self + } + + /// Sets the list of features requested for the package. + pub fn set_features(&mut self, features: Vec) -> &mut Dependency { + Rc::make_mut(&mut self.inner).features = + features.iter().map(|s| InternedString::new(s)).collect(); + self + } + + /// Sets whether the dependency requests default features of the package. + pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).default_features = default_features; + self + } + + /// Sets whether the dependency is optional. + pub fn set_optional(&mut self, optional: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).optional = optional; + self + } + + /// Set the source id for this dependency + pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).source_id = id; + self + } + + /// Set the version requirement for this dependency + pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency { + Rc::make_mut(&mut self.inner).req = req; + self + } + + pub fn set_platform(&mut self, platform: Option) -> &mut Dependency { + Rc::make_mut(&mut self.inner).platform = platform; + self + } + + pub fn set_rename(&mut self, rename: &str) -> &mut Dependency { + Rc::make_mut(&mut self.inner).rename = Some(rename.to_string()); + self + } + + /// Lock this dependency to depending on the specified package id + pub fn lock_to(&mut self, id: &PackageId) -> &mut Dependency { + assert_eq!(self.inner.source_id, *id.source_id()); + assert!(self.inner.req.matches(id.version())); + trace!( + "locking dep from `{}` with `{}` at {} to {}", + self.name(), + self.version_req(), + self.source_id(), + id + ); + self.set_version_req(VersionReq::exact(id.version())) + .set_source_id(id.source_id().clone()) + } + + /// Returns whether this is a "locked" dependency, basically whether it has + /// an exact version req. + pub fn is_locked(&self) -> bool { + // Kind of a hack to figure this out, but it works! + self.inner.req.to_string().starts_with('=') + } + + /// Returns false if the dependency is only used to build the local package. + pub fn is_transitive(&self) -> bool { + match self.inner.kind { + Kind::Normal | Kind::Build => true, + Kind::Development => false, + } + } + + pub fn is_build(&self) -> bool { + match self.inner.kind { + Kind::Build => true, + _ => false, + } + } + + pub fn is_optional(&self) -> bool { + self.inner.optional + } + + /// Returns true if the default features of the dependency are requested. + pub fn uses_default_features(&self) -> bool { + self.inner.default_features + } + /// Returns the list of features that are requested by the dependency. + pub fn features(&self) -> &[InternedString] { + &self.inner.features + } + + /// Returns true if the package (`sum`) can fulfill this dependency request. + pub fn matches(&self, sum: &Summary) -> bool { + self.matches_id(sum.package_id()) + } + + /// Returns true if the package (`sum`) can fulfill this dependency request. + pub fn matches_ignoring_source(&self, id: &PackageId) -> bool { + self.name() == id.name() && self.version_req().matches(id.version()) + } + + /// Returns true if the package (`id`) can fulfill this dependency request. + pub fn matches_id(&self, id: &PackageId) -> bool { + self.inner.name == id.name() + && (self.inner.only_match_name + || (self.inner.req.matches(id.version()) + && &self.inner.source_id == id.source_id())) + } + + pub fn map_source(mut self, to_replace: &SourceId, replace_with: &SourceId) -> Dependency { + if self.source_id() != to_replace { + self + } else { + self.set_source_id(replace_with.clone()); + self + } + } +} + +impl Platform { + pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool { + match *self { + Platform::Name(ref p) => p == name, + Platform::Cfg(ref p) => match cfg { + Some(cfg) => p.matches(cfg), + None => false, + }, + } + } +} + +impl ser::Serialize for Platform { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl FromStr for Platform { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + if s.starts_with("cfg(") && s.ends_with(')') { + let s = &s[4..s.len() - 1]; + let p = s.parse() + .map(Platform::Cfg) + .chain_err(|| format_err!("failed to parse `{}` as a cfg expression", s))?; + Ok(p) + } else { + Ok(Platform::Name(s.to_string())) + } + } +} + +impl fmt::Display for Platform { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Platform::Name(ref n) => n.fmt(f), + Platform::Cfg(ref e) => write!(f, "cfg({})", e), + } + } +} diff --git a/src/cargo/core/features.rs b/src/cargo/core/features.rs new file mode 100644 index 000000000..b83e242ce --- /dev/null +++ b/src/cargo/core/features.rs @@ -0,0 +1,366 @@ +//! Support for nightly features in Cargo itself +//! +//! This file is the version of `feature_gate.rs` in upstream Rust for Cargo +//! itself and is intended to be the avenue for which new features in Cargo are +//! gated by default and then eventually stabilized. All known stable and +//! unstable features are tracked in this file. +//! +//! If you're reading this then you're likely interested in adding a feature to +//! Cargo, and the good news is that it shouldn't be too hard! To do this you'll +//! want to follow these steps: +//! +//! 1. Add your feature. Do this by searching for "look here" in this file and +//! expanding the macro invocation that lists all features with your new +//! feature. +//! +//! 2. Find the appropriate place to place the feature gate in Cargo itself. If +//! you're extending the manifest format you'll likely just want to modify +//! the `Manifest::feature_gate` function, but otherwise you may wish to +//! place the feature gate elsewhere in Cargo. +//! +//! 3. To actually perform the feature gate, you'll want to have code that looks +//! like: +//! +//! ```rust,ignore +//! use core::{Feature, Features}; +//! +//! let feature = Feature::launch_into_space(); +//! package.manifest().features().require(feature).chain_err(|| { +//! "launching Cargo into space right now is unstable and may result in \ +//! unintended damage to your codebase, use with caution" +//! })?; +//! ``` +//! +//! Notably you'll notice the `require` function called with your `Feature`, and +//! then you use `chain_err` to tack on more context for why the feature was +//! required when the feature isn't activated. +//! +//! 4. Update the unstable documentation at +//! `src/doc/src/reference/unstable.md` to include a short description of +//! how to use your new feature. When the feature is stabilized, be sure +//! that the Cargo Guide or Reference is updated to fully document the +//! feature and remove the entry from the Unstable section. +//! +//! And hopefully that's it! Bear with us though that this is, at the time of +//! this writing, a very new feature in Cargo. If the process differs from this +//! we'll be sure to update this documentation! + +use std::env; +use std::fmt; +use std::str::FromStr; + +use failure::Error; + +use util::errors::CargoResult; + +/// The edition of the compiler (RFC 2052) +#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, Eq, PartialEq, Serialize, Deserialize)] +pub enum Edition { + /// The 2015 edition + Edition2015, + /// The 2018 edition + Edition2018, +} + +impl fmt::Display for Edition { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Edition::Edition2015 => f.write_str("2015"), + Edition::Edition2018 => f.write_str("2018"), + } + } +} +impl FromStr for Edition { + type Err = Error; + fn from_str(s: &str) -> Result { + match s { + "2015" => Ok(Edition::Edition2015), + "2018" => Ok(Edition::Edition2018), + s => { + bail!("supported edition values are `2015` or `2018`, but `{}` \ + is unknown", s) + } + } + } +} + +enum Status { + Stable, + Unstable, +} + +macro_rules! features { + ( + pub struct Features { + $([$stab:ident] $feature:ident: bool,)* + } + ) => ( + #[derive(Default, Clone, Debug)] + pub struct Features { + $($feature: bool,)* + activated: Vec, + } + + impl Feature { + $( + pub fn $feature() -> &'static Feature { + fn get(features: &Features) -> bool { + features.$feature + } + static FEAT: Feature = Feature { + name: stringify!($feature), + get, + }; + &FEAT + } + )* + + fn is_enabled(&self, features: &Features) -> bool { + (self.get)(features) + } + } + + impl Features { + fn status(&mut self, feature: &str) -> Option<(&mut bool, Status)> { + if feature.contains("_") { + return None + } + let feature = feature.replace("-", "_"); + $( + if feature == stringify!($feature) { + return Some((&mut self.$feature, stab!($stab))) + } + )* + None + } + } + ) +} + +macro_rules! stab { + (stable) => { + Status::Stable + }; + (unstable) => { + Status::Unstable + }; +} + +/// A listing of all features in Cargo +/// +/// "look here" +/// +/// This is the macro that lists all stable and unstable features in Cargo. +/// You'll want to add to this macro whenever you add a feature to Cargo, also +/// following the directions above. +/// +/// Note that all feature names here are valid Rust identifiers, but the `_` +/// character is translated to `-` when specified in the `cargo-features` +/// manifest entry in `Cargo.toml`. +features! { + pub struct Features { + + // A dummy feature that doesn't actually gate anything, but it's used in + // testing to ensure that we can enable stable features. + [stable] test_dummy_stable: bool, + + // A dummy feature that gates the usage of the `im-a-teapot` manifest + // entry. This is basically just intended for tests. + [unstable] test_dummy_unstable: bool, + + // Downloading packages from alternative registry indexes. + [unstable] alternative_registries: bool, + + // Using editions + [unstable] edition: bool, + + // Renaming a package in the manifest via the `package` key + [unstable] rename_dependency: bool, + + // Whether a lock file is published with this crate + [unstable] publish_lockfile: bool, + + // Overriding profiles for dependencies. + [unstable] profile_overrides: bool, + + // Separating the namespaces for features and dependencies + [unstable] namespaced_features: bool, + } +} + +pub struct Feature { + name: &'static str, + get: fn(&Features) -> bool, +} + +impl Features { + pub fn new(features: &[String], warnings: &mut Vec) -> CargoResult { + let mut ret = Features::default(); + for feature in features { + ret.add(feature, warnings)?; + ret.activated.push(feature.to_string()); + } + Ok(ret) + } + + fn add(&mut self, feature: &str, warnings: &mut Vec) -> CargoResult<()> { + let (slot, status) = match self.status(feature) { + Some(p) => p, + None => bail!("unknown cargo feature `{}`", feature), + }; + + if *slot { + bail!("the cargo feature `{}` has already been activated", feature); + } + + match status { + Status::Stable => { + let warning = format!( + "the cargo feature `{}` is now stable \ + and is no longer necessary to be listed \ + in the manifest", + feature + ); + warnings.push(warning); + } + Status::Unstable if !nightly_features_allowed() => bail!( + "the cargo feature `{}` requires a nightly version of \ + Cargo, but this is the `{}` channel", + feature, + channel() + ), + Status::Unstable => {} + } + + *slot = true; + + Ok(()) + } + + pub fn activated(&self) -> &[String] { + &self.activated + } + + pub fn require(&self, feature: &Feature) -> CargoResult<()> { + if feature.is_enabled(self) { + Ok(()) + } else { + let feature = feature.name.replace("_", "-"); + let mut msg = format!("feature `{}` is required", feature); + + if nightly_features_allowed() { + let s = format!( + "\n\nconsider adding `cargo-features = [\"{0}\"]` \ + to the manifest", + feature + ); + msg.push_str(&s); + } else { + let s = format!( + "\n\n\ + this Cargo does not support nightly features, but if you\n\ + switch to nightly channel you can add\n\ + `cargo-features = [\"{}\"]` to enable this feature", + feature + ); + msg.push_str(&s); + } + bail!("{}", msg); + } + } + + pub fn is_enabled(&self, feature: &Feature) -> bool { + feature.is_enabled(self) + } +} + +/// A parsed representation of all unstable flags that Cargo accepts. +/// +/// Cargo, like `rustc`, accepts a suite of `-Z` flags which are intended for +/// gating unstable functionality to Cargo. These flags are only available on +/// the nightly channel of Cargo. +/// +/// This struct doesn't have quite the same convenience macro that the features +/// have above, but the procedure should still be relatively stable for adding a +/// new unstable flag: +/// +/// 1. First, add a field to this `CliUnstable` structure. All flags are allowed +/// to have a value as the `-Z` flags are either of the form `-Z foo` or +/// `-Z foo=bar`, and it's up to you how to parse `bar`. +/// +/// 2. Add an arm to the match statement in `CliUnstable::add` below to match on +/// your new flag. The key (`k`) is what you're matching on and the value is +/// in `v`. +/// +/// 3. (optional) Add a new parsing function to parse your datatype. As of now +/// there's an example for `bool`, but more can be added! +/// +/// 4. In Cargo use `config.cli_unstable()` to get a reference to this structure +/// and then test for your flag or your value and act accordingly. +/// +/// If you have any trouble with this, please let us know! +#[derive(Default, Debug)] +pub struct CliUnstable { + pub print_im_a_teapot: bool, + pub unstable_options: bool, + pub offline: bool, + pub no_index_update: bool, + pub avoid_dev_deps: bool, + pub minimal_versions: bool, + pub package_features: bool, +} + +impl CliUnstable { + pub fn parse(&mut self, flags: &[String]) -> CargoResult<()> { + if !flags.is_empty() && !nightly_features_allowed() { + bail!("the `-Z` flag is only accepted on the nightly channel of Cargo") + } + for flag in flags { + self.add(flag)?; + } + Ok(()) + } + + fn add(&mut self, flag: &str) -> CargoResult<()> { + let mut parts = flag.splitn(2, '='); + let k = parts.next().unwrap(); + let v = parts.next(); + + fn parse_bool(value: Option<&str>) -> CargoResult { + match value { + None | Some("yes") => Ok(true), + Some("no") => Ok(false), + Some(s) => bail!("expected `no` or `yes`, found: {}", s), + } + } + + match k { + "print-im-a-teapot" => self.print_im_a_teapot = parse_bool(v)?, + "unstable-options" => self.unstable_options = true, + "offline" => self.offline = true, + "no-index-update" => self.no_index_update = true, + "avoid-dev-deps" => self.avoid_dev_deps = true, + "minimal-versions" => self.minimal_versions = true, + "package-features" => self.package_features = true, + _ => bail!("unknown `-Z` flag specified: {}", k), + } + + Ok(()) + } +} + +fn channel() -> String { + env::var("__CARGO_TEST_CHANNEL_OVERRIDE_DO_NOT_USE_THIS").unwrap_or_else(|_| { + ::version() + .cfg_info + .map(|c| c.release_channel) + .unwrap_or_else(|| String::from("dev")) + }) +} + +fn nightly_features_allowed() -> bool { + match &channel()[..] { + "nightly" | "dev" => true, + _ => false, + } +} diff --git a/src/cargo/core/interning.rs b/src/cargo/core/interning.rs new file mode 100644 index 000000000..d8c18df2d --- /dev/null +++ b/src/cargo/core/interning.rs @@ -0,0 +1,108 @@ +use serde::{Serialize, Serializer}; + +use std::fmt; +use std::sync::RwLock; +use std::collections::HashSet; +use std::slice; +use std::str; +use std::mem; +use std::ptr; +use std::cmp::Ordering; +use std::ops::Deref; +use std::hash::{Hash, Hasher}; + +pub fn leak(s: String) -> &'static str { + let boxed = s.into_boxed_str(); + let ptr = boxed.as_ptr(); + let len = boxed.len(); + mem::forget(boxed); + unsafe { + let slice = slice::from_raw_parts(ptr, len); + str::from_utf8_unchecked(slice) + } +} + +lazy_static! { + static ref STRING_CACHE: RwLock> = + RwLock::new(HashSet::new()); +} + +#[derive(Clone, Copy)] +pub struct InternedString { + inner: &'static str, +} + +impl PartialEq for InternedString { + fn eq(&self, other: &InternedString) -> bool { + ptr::eq(self.as_str(), other.as_str()) + } +} + +impl Eq for InternedString {} + +impl InternedString { + pub fn new(str: &str) -> InternedString { + let mut cache = STRING_CACHE.write().unwrap(); + let s = cache.get(str).map(|&s| s).unwrap_or_else(|| { + let s = leak(str.to_string()); + cache.insert(s); + s + }); + + InternedString { inner: s } + } + + pub fn as_str(&self) -> &'static str { + self.inner + } +} + +impl Deref for InternedString { + type Target = str; + + fn deref(&self) -> &'static str { + self.as_str() + } +} + +impl Hash for InternedString { + // NB: we can't implement this as `identity(self).hash(state)`, + // because we use this for on-disk fingerprints and so need + // stability across Cargo invocations. + fn hash(&self, state: &mut H) { + self.as_str().hash(state); + } +} + +impl fmt::Debug for InternedString { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +impl fmt::Display for InternedString { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(self.as_str(), f) + } +} + +impl Ord for InternedString { + fn cmp(&self, other: &InternedString) -> Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl PartialOrd for InternedString { + fn partial_cmp(&self, other: &InternedString) -> Option { + Some(self.cmp(other)) + } +} + +impl Serialize for InternedString { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self.inner) + } +} diff --git a/src/cargo/core/manifest.rs b/src/cargo/core/manifest.rs new file mode 100644 index 000000000..5e4210386 --- /dev/null +++ b/src/cargo/core/manifest.rs @@ -0,0 +1,677 @@ +use std::collections::{BTreeMap, HashMap}; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::path::{Path, PathBuf}; +use std::rc::Rc; + +use semver::Version; +use serde::ser; +use toml; +use url::Url; + +use core::interning::InternedString; +use core::profiles::Profiles; +use core::{Dependency, PackageId, PackageIdSpec, SourceId, Summary}; +use core::{Edition, Feature, Features, WorkspaceConfig}; +use util::errors::*; +use util::toml::TomlManifest; +use util::Config; + +pub enum EitherManifest { + Real(Manifest), + Virtual(VirtualManifest), +} + +/// Contains all the information about a package, as loaded from a Cargo.toml. +#[derive(Clone, Debug)] +pub struct Manifest { + summary: Summary, + targets: Vec, + links: Option, + warnings: Vec, + exclude: Vec, + include: Vec, + metadata: ManifestMetadata, + custom_metadata: Option, + profiles: Profiles, + publish: Option>, + publish_lockfile: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + original: Rc, + features: Features, + edition: Edition, + im_a_teapot: Option, +} + +/// When parsing `Cargo.toml`, some warnings should silenced +/// if the manifest comes from a dependency. `ManifestWarning` +/// allows this delayed emission of warnings. +#[derive(Clone, Debug)] +pub struct DelayedWarning { + pub message: String, + pub is_critical: bool, +} + +#[derive(Clone, Debug)] +pub struct VirtualManifest { + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles, +} + +/// General metadata about a package which is just blindly uploaded to the +/// registry. +/// +/// Note that many of these fields can contain invalid values such as the +/// homepage, repository, documentation, or license. These fields are not +/// validated by cargo itself, but rather it is up to the registry when uploaded +/// to validate these fields. Cargo will itself accept any valid TOML +/// specification for these values. +#[derive(PartialEq, Clone, Debug)] +pub struct ManifestMetadata { + pub authors: Vec, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub description: Option, // not markdown + pub readme: Option, // file, not contents + pub homepage: Option, // url + pub repository: Option, // url + pub documentation: Option, // url + pub badges: BTreeMap>, + pub links: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum LibKind { + Lib, + Rlib, + Dylib, + ProcMacro, + Other(String), +} + +impl LibKind { + pub fn from_str(string: &str) -> LibKind { + match string { + "lib" => LibKind::Lib, + "rlib" => LibKind::Rlib, + "dylib" => LibKind::Dylib, + "proc-macro" => LibKind::ProcMacro, + s => LibKind::Other(s.to_string()), + } + } + + /// Returns the argument suitable for `--crate-type` to pass to rustc. + pub fn crate_type(&self) -> &str { + match *self { + LibKind::Lib => "lib", + LibKind::Rlib => "rlib", + LibKind::Dylib => "dylib", + LibKind::ProcMacro => "proc-macro", + LibKind::Other(ref s) => s, + } + } + + pub fn linkable(&self) -> bool { + match *self { + LibKind::Lib | LibKind::Rlib | LibKind::Dylib | LibKind::ProcMacro => true, + LibKind::Other(..) => false, + } + } +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub enum TargetKind { + Lib(Vec), + Bin, + Test, + Bench, + ExampleLib(Vec), + ExampleBin, + CustomBuild, +} + +impl ser::Serialize for TargetKind { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + use self::TargetKind::*; + match *self { + Lib(ref kinds) => kinds.iter().map(LibKind::crate_type).collect(), + Bin => vec!["bin"], + ExampleBin | ExampleLib(_) => vec!["example"], + Test => vec!["test"], + CustomBuild => vec!["custom-build"], + Bench => vec!["bench"], + }.serialize(s) + } +} + +/// Information about a binary, a library, an example, etc. that is part of the +/// package. +#[derive(Clone, Hash, PartialEq, Eq, Debug)] +pub struct Target { + kind: TargetKind, + name: String, + // Note that the `src_path` here is excluded from the `Hash` implementation + // as it's absolute currently and is otherwise a little too brittle for + // causing rebuilds. Instead the hash for the path that we send to the + // compiler is handled elsewhere. + src_path: NonHashedPathBuf, + required_features: Option>, + tested: bool, + benched: bool, + doc: bool, + doctest: bool, + harness: bool, // whether to use the test harness (--test) + for_host: bool, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +struct NonHashedPathBuf { + path: PathBuf, +} + +impl Hash for NonHashedPathBuf { + fn hash(&self, _: &mut H) { + // ... + } +} + +#[derive(Serialize)] +struct SerializedTarget<'a> { + /// Is this a `--bin bin`, `--lib`, `--example ex`? + /// Serialized as a list of strings for historical reasons. + kind: &'a TargetKind, + /// Corresponds to `--crate-type` compiler attribute. + /// See https://doc.rust-lang.org/reference/linkage.html + crate_types: Vec<&'a str>, + name: &'a str, + src_path: &'a PathBuf, +} + +impl ser::Serialize for Target { + fn serialize(&self, s: S) -> Result { + SerializedTarget { + kind: &self.kind, + crate_types: self.rustc_crate_types(), + name: &self.name, + src_path: &self.src_path.path, + }.serialize(s) + } +} + +impl Manifest { + pub fn new( + summary: Summary, + targets: Vec, + exclude: Vec, + include: Vec, + links: Option, + metadata: ManifestMetadata, + custom_metadata: Option, + profiles: Profiles, + publish: Option>, + publish_lockfile: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + features: Features, + edition: Edition, + im_a_teapot: Option, + original: Rc, + ) -> Manifest { + Manifest { + summary, + targets, + warnings: Vec::new(), + exclude, + include, + links, + metadata, + custom_metadata, + profiles, + publish, + replace, + patch, + workspace, + features, + edition, + original, + im_a_teapot, + publish_lockfile, + } + } + + pub fn dependencies(&self) -> &[Dependency] { + self.summary.dependencies() + } + pub fn exclude(&self) -> &[String] { + &self.exclude + } + pub fn include(&self) -> &[String] { + &self.include + } + pub fn metadata(&self) -> &ManifestMetadata { + &self.metadata + } + pub fn name(&self) -> InternedString { + self.package_id().name() + } + pub fn package_id(&self) -> &PackageId { + self.summary.package_id() + } + pub fn summary(&self) -> &Summary { + &self.summary + } + pub fn targets(&self) -> &[Target] { + &self.targets + } + pub fn version(&self) -> &Version { + self.package_id().version() + } + pub fn warnings(&self) -> &[DelayedWarning] { + &self.warnings + } + pub fn profiles(&self) -> &Profiles { + &self.profiles + } + pub fn publish(&self) -> &Option> { + &self.publish + } + pub fn publish_lockfile(&self) -> bool { + self.publish_lockfile + } + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { + &self.replace + } + pub fn original(&self) -> &TomlManifest { + &self.original + } + pub fn patch(&self) -> &HashMap> { + &self.patch + } + pub fn links(&self) -> Option<&str> { + self.links.as_ref().map(|s| &s[..]) + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn features(&self) -> &Features { + &self.features + } + + pub fn add_warning(&mut self, s: String) { + self.warnings.push(DelayedWarning { + message: s, + is_critical: false, + }) + } + + pub fn add_critical_warning(&mut self, s: String) { + self.warnings.push(DelayedWarning { + message: s, + is_critical: true, + }) + } + + pub fn set_summary(&mut self, summary: Summary) { + self.summary = summary; + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) -> Manifest { + Manifest { + summary: self.summary.map_source(to_replace, replace_with), + ..self + } + } + + pub fn feature_gate(&self) -> CargoResult<()> { + if self.im_a_teapot.is_some() { + self.features + .require(Feature::test_dummy_unstable()) + .chain_err(|| { + format_err!( + "the `im-a-teapot` manifest key is unstable and may \ + not work properly in England" + ) + })?; + } + + Ok(()) + } + + // Just a helper function to test out `-Z` flags on Cargo + pub fn print_teapot(&self, config: &Config) { + if let Some(teapot) = self.im_a_teapot { + if config.cli_unstable().print_im_a_teapot { + println!("im-a-teapot = {}", teapot); + } + } + } + + pub fn edition(&self) -> Edition { + self.edition + } + + pub fn custom_metadata(&self) -> Option<&toml::Value> { + self.custom_metadata.as_ref() + } +} + +impl VirtualManifest { + pub fn new( + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles, + ) -> VirtualManifest { + VirtualManifest { + replace, + patch, + workspace, + profiles, + } + } + + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { + &self.replace + } + + pub fn patch(&self) -> &HashMap> { + &self.patch + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn profiles(&self) -> &Profiles { + &self.profiles + } +} + +impl Target { + fn with_path(src_path: PathBuf) -> Target { + assert!( + src_path.is_absolute(), + "`{}` is not absolute", + src_path.display() + ); + Target { + kind: TargetKind::Bin, + name: String::new(), + src_path: NonHashedPathBuf { path: src_path }, + required_features: None, + doc: false, + doctest: false, + harness: true, + for_host: false, + tested: true, + benched: true, + } + } + + pub fn lib_target(name: &str, crate_targets: Vec, src_path: PathBuf) -> Target { + Target { + kind: TargetKind::Lib(crate_targets), + name: name.to_string(), + doctest: true, + doc: true, + ..Target::with_path(src_path) + } + } + + pub fn bin_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + ) -> Target { + Target { + kind: TargetKind::Bin, + name: name.to_string(), + required_features, + doc: true, + ..Target::with_path(src_path) + } + } + + /// Builds a `Target` corresponding to the `build = "build.rs"` entry. + pub fn custom_build_target(name: &str, src_path: PathBuf) -> Target { + Target { + kind: TargetKind::CustomBuild, + name: name.to_string(), + for_host: true, + benched: false, + tested: false, + ..Target::with_path(src_path) + } + } + + pub fn example_target( + name: &str, + crate_targets: Vec, + src_path: PathBuf, + required_features: Option>, + ) -> Target { + let kind = if crate_targets.is_empty() { + TargetKind::ExampleBin + } else { + TargetKind::ExampleLib(crate_targets) + }; + + Target { + kind, + name: name.to_string(), + required_features, + tested: false, + benched: false, + ..Target::with_path(src_path) + } + } + + pub fn test_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + ) -> Target { + Target { + kind: TargetKind::Test, + name: name.to_string(), + required_features, + benched: false, + ..Target::with_path(src_path) + } + } + + pub fn bench_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + ) -> Target { + Target { + kind: TargetKind::Bench, + name: name.to_string(), + required_features, + tested: false, + ..Target::with_path(src_path) + } + } + + pub fn name(&self) -> &str { + &self.name + } + pub fn crate_name(&self) -> String { + self.name.replace("-", "_") + } + pub fn src_path(&self) -> &Path { + &self.src_path.path + } + pub fn required_features(&self) -> Option<&Vec> { + self.required_features.as_ref() + } + pub fn kind(&self) -> &TargetKind { + &self.kind + } + pub fn tested(&self) -> bool { + self.tested + } + pub fn harness(&self) -> bool { + self.harness + } + pub fn documented(&self) -> bool { + self.doc + } + pub fn for_host(&self) -> bool { + self.for_host + } + pub fn benched(&self) -> bool { + self.benched + } + + pub fn doctested(&self) -> bool { + self.doctest && match self.kind { + TargetKind::Lib(ref kinds) => kinds + .iter() + .any(|k| *k == LibKind::Rlib || *k == LibKind::Lib || *k == LibKind::ProcMacro), + _ => false, + } + } + + pub fn allows_underscores(&self) -> bool { + self.is_bin() || self.is_example() || self.is_custom_build() + } + + pub fn is_lib(&self) -> bool { + match self.kind { + TargetKind::Lib(_) => true, + _ => false, + } + } + + pub fn is_dylib(&self) -> bool { + match self.kind { + TargetKind::Lib(ref libs) => libs.iter().any(|l| *l == LibKind::Dylib), + _ => false, + } + } + + pub fn is_cdylib(&self) -> bool { + let libs = match self.kind { + TargetKind::Lib(ref libs) => libs, + _ => return false, + }; + libs.iter().any(|l| match *l { + LibKind::Other(ref s) => s == "cdylib", + _ => false, + }) + } + + pub fn linkable(&self) -> bool { + match self.kind { + TargetKind::Lib(ref kinds) => kinds.iter().any(|k| k.linkable()), + _ => false, + } + } + + pub fn is_bin(&self) -> bool { + self.kind == TargetKind::Bin + } + + pub fn is_example(&self) -> bool { + match self.kind { + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => true, + _ => false, + } + } + + pub fn is_bin_example(&self) -> bool { + // Needed for --all-examples in contexts where only runnable examples make sense + match self.kind { + TargetKind::ExampleBin => true, + _ => false, + } + } + + pub fn is_test(&self) -> bool { + self.kind == TargetKind::Test + } + pub fn is_bench(&self) -> bool { + self.kind == TargetKind::Bench + } + pub fn is_custom_build(&self) -> bool { + self.kind == TargetKind::CustomBuild + } + + /// Returns the arguments suitable for `--crate-type` to pass to rustc. + pub fn rustc_crate_types(&self) -> Vec<&str> { + match self.kind { + TargetKind::Lib(ref kinds) | TargetKind::ExampleLib(ref kinds) => { + kinds.iter().map(LibKind::crate_type).collect() + } + TargetKind::CustomBuild + | TargetKind::Bench + | TargetKind::Test + | TargetKind::ExampleBin + | TargetKind::Bin => vec!["bin"], + } + } + + pub fn can_lto(&self) -> bool { + match self.kind { + TargetKind::Lib(ref v) => { + !v.contains(&LibKind::Rlib) && !v.contains(&LibKind::Dylib) + && !v.contains(&LibKind::Lib) + } + _ => true, + } + } + + pub fn set_tested(&mut self, tested: bool) -> &mut Target { + self.tested = tested; + self + } + pub fn set_benched(&mut self, benched: bool) -> &mut Target { + self.benched = benched; + self + } + pub fn set_doctest(&mut self, doctest: bool) -> &mut Target { + self.doctest = doctest; + self + } + pub fn set_for_host(&mut self, for_host: bool) -> &mut Target { + self.for_host = for_host; + self + } + pub fn set_harness(&mut self, harness: bool) -> &mut Target { + self.harness = harness; + self + } + pub fn set_doc(&mut self, doc: bool) -> &mut Target { + self.doc = doc; + self + } +} + +impl fmt::Display for Target { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.kind { + TargetKind::Lib(..) => write!(f, "Target(lib)"), + TargetKind::Bin => write!(f, "Target(bin: {})", self.name), + TargetKind::Test => write!(f, "Target(test: {})", self.name), + TargetKind::Bench => write!(f, "Target(bench: {})", self.name), + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => { + write!(f, "Target(example: {})", self.name) + } + TargetKind::CustomBuild => write!(f, "Target(script)"), + } + } +} diff --git a/src/cargo/core/mod.rs b/src/cargo/core/mod.rs new file mode 100644 index 000000000..67578c822 --- /dev/null +++ b/src/cargo/core/mod.rs @@ -0,0 +1,29 @@ +pub use self::dependency::Dependency; +pub use self::features::{CliUnstable, Edition, Feature, Features}; +pub use self::manifest::{EitherManifest, VirtualManifest}; +pub use self::manifest::{LibKind, Manifest, Target, TargetKind}; +pub use self::package::{Package, PackageSet}; +pub use self::package_id::PackageId; +pub use self::package_id_spec::PackageIdSpec; +pub use self::registry::Registry; +pub use self::resolver::Resolve; +pub use self::shell::{Shell, Verbosity}; +pub use self::source::{GitReference, Source, SourceId, SourceMap}; +pub use self::summary::{FeatureMap, FeatureValue, Summary}; +pub use self::workspace::{Members, Workspace, WorkspaceConfig, WorkspaceRootConfig}; + +pub mod compiler; +pub mod dependency; +mod features; +mod interning; +pub mod manifest; +pub mod package; +pub mod package_id; +mod package_id_spec; +pub mod profiles; +pub mod registry; +pub mod resolver; +pub mod shell; +pub mod source; +pub mod summary; +mod workspace; diff --git a/src/cargo/core/package.rs b/src/cargo/core/package.rs new file mode 100644 index 000000000..37ad259f1 --- /dev/null +++ b/src/cargo/core/package.rs @@ -0,0 +1,258 @@ +use std::cell::{Ref, RefCell}; +use std::collections::HashMap; +use std::fmt; +use std::hash; +use std::path::{Path, PathBuf}; + +use semver::Version; +use serde::ser; +use toml; +use lazycell::LazyCell; + +use core::{Dependency, Manifest, PackageId, SourceId, Target}; +use core::{FeatureMap, SourceMap, Summary}; +use core::interning::InternedString; +use util::{internal, lev_distance, Config}; +use util::errors::{CargoResult, CargoResultExt}; + +/// Information about a package that is available somewhere in the file system. +/// +/// A package is a `Cargo.toml` file plus all the files that are part of it. +// TODO: Is manifest_path a relic? +#[derive(Clone, Debug)] +pub struct Package { + /// The package's manifest + manifest: Manifest, + /// The root of the package + manifest_path: PathBuf, +} + +/// A Package in a form where `Serialize` can be derived. +#[derive(Serialize)] +struct SerializedPackage<'a> { + name: &'a str, + version: &'a str, + id: &'a PackageId, + license: Option<&'a str>, + license_file: Option<&'a str>, + description: Option<&'a str>, + source: &'a SourceId, + dependencies: &'a [Dependency], + targets: &'a [Target], + features: &'a FeatureMap, + manifest_path: &'a str, + metadata: Option<&'a toml::Value>, + authors: &'a [String], + categories: &'a [String], + keywords: &'a [String], + readme: Option<&'a str>, + repository: Option<&'a str>, +} + +impl ser::Serialize for Package { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + let summary = self.manifest.summary(); + let package_id = summary.package_id(); + let manmeta = self.manifest.metadata(); + let license = manmeta.license.as_ref().map(String::as_ref); + let license_file = manmeta.license_file.as_ref().map(String::as_ref); + let description = manmeta.description.as_ref().map(String::as_ref); + let authors = manmeta.authors.as_ref(); + let categories = manmeta.categories.as_ref(); + let keywords = manmeta.keywords.as_ref(); + let readme = manmeta.readme.as_ref().map(String::as_ref); + let repository = manmeta.repository.as_ref().map(String::as_ref); + + SerializedPackage { + name: &*package_id.name(), + version: &package_id.version().to_string(), + id: package_id, + license, + license_file, + description, + source: summary.source_id(), + dependencies: summary.dependencies(), + targets: self.manifest.targets(), + features: summary.features(), + manifest_path: &self.manifest_path.display().to_string(), + metadata: self.manifest.custom_metadata(), + authors, + categories, + keywords, + readme, + repository, + }.serialize(s) + } +} + +impl Package { + /// Create a package from a manifest and its location + pub fn new(manifest: Manifest, manifest_path: &Path) -> Package { + Package { + manifest, + manifest_path: manifest_path.to_path_buf(), + } + } + + /// Get the manifest dependencies + pub fn dependencies(&self) -> &[Dependency] { + self.manifest.dependencies() + } + /// Get the manifest + pub fn manifest(&self) -> &Manifest { + &self.manifest + } + /// Get the path to the manifest + pub fn manifest_path(&self) -> &Path { + &self.manifest_path + } + /// Get the name of the package + pub fn name(&self) -> InternedString { + self.package_id().name() + } + /// Get the PackageId object for the package (fully defines a package) + pub fn package_id(&self) -> &PackageId { + self.manifest.package_id() + } + /// Get the root folder of the package + pub fn root(&self) -> &Path { + self.manifest_path.parent().unwrap() + } + /// Get the summary for the package + pub fn summary(&self) -> &Summary { + self.manifest.summary() + } + /// Get the targets specified in the manifest + pub fn targets(&self) -> &[Target] { + self.manifest.targets() + } + /// Get the current package version + pub fn version(&self) -> &Version { + self.package_id().version() + } + /// Get the package authors + pub fn authors(&self) -> &Vec { + &self.manifest.metadata().authors + } + /// Whether the package is set to publish + pub fn publish(&self) -> &Option> { + self.manifest.publish() + } + + /// Whether the package uses a custom build script for any target + pub fn has_custom_build(&self) -> bool { + self.targets().iter().any(|t| t.is_custom_build()) + } + + pub fn find_closest_target( + &self, + target: &str, + is_expected_kind: fn(&Target) -> bool, + ) -> Option<&Target> { + let targets = self.targets(); + + let matches = targets + .iter() + .filter(|t| is_expected_kind(t)) + .map(|t| (lev_distance(target, t.name()), t)) + .filter(|&(d, _)| d < 4); + matches.min_by_key(|t| t.0).map(|t| t.1) + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) -> Package { + Package { + manifest: self.manifest.map_source(to_replace, replace_with), + manifest_path: self.manifest_path, + } + } + + pub fn to_registry_toml(&self, config: &Config) -> CargoResult { + let manifest = self.manifest().original().prepare_for_publish(config)?; + let toml = toml::to_string(&manifest)?; + Ok(format!( + "\ + # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO\n\ + #\n\ + # When uploading crates to the registry Cargo will automatically\n\ + # \"normalize\" Cargo.toml files for maximal compatibility\n\ + # with all versions of Cargo and also rewrite `path` dependencies\n\ + # to registry (e.g. crates.io) dependencies\n\ + #\n\ + # If you believe there's an error in this file please file an\n\ + # issue against the rust-lang/cargo repository. If you're\n\ + # editing this file be aware that the upstream Cargo.toml\n\ + # will likely look very different (and much more reasonable)\n\ + \n\ + {}\ + ", + toml + )) + } +} + +impl fmt::Display for Package { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.summary().package_id()) + } +} + +impl PartialEq for Package { + fn eq(&self, other: &Package) -> bool { + self.package_id() == other.package_id() + } +} + +impl Eq for Package {} + +impl hash::Hash for Package { + fn hash(&self, into: &mut H) { + self.package_id().hash(into) + } +} + +#[derive(Debug)] +pub struct PackageSet<'cfg> { + packages: HashMap>, + sources: RefCell>, +} + +impl<'cfg> PackageSet<'cfg> { + pub fn new(package_ids: &[PackageId], sources: SourceMap<'cfg>) -> PackageSet<'cfg> { + PackageSet { + packages: package_ids + .iter() + .map(|id| (id.clone(), LazyCell::new())) + .collect(), + sources: RefCell::new(sources), + } + } + + pub fn package_ids<'a>(&'a self) -> Box + 'a> { + Box::new(self.packages.keys()) + } + + pub fn get(&self, id: &PackageId) -> CargoResult<&Package> { + let slot = self.packages + .get(id) + .ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?; + if let Some(pkg) = slot.borrow() { + return Ok(pkg); + } + let mut sources = self.sources.borrow_mut(); + let source = sources + .get_mut(id.source_id()) + .ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?; + let pkg = source + .download(id) + .chain_err(|| format_err!("unable to get packages from source"))?; + assert!(slot.fill(pkg).is_ok()); + Ok(slot.borrow().unwrap()) + } + + pub fn sources(&self) -> Ref> { + self.sources.borrow() + } +} diff --git a/src/cargo/core/package_id.rs b/src/cargo/core/package_id.rs new file mode 100644 index 000000000..7bb64e8a1 --- /dev/null +++ b/src/cargo/core/package_id.rs @@ -0,0 +1,198 @@ +use std::cmp::Ordering; +use std::fmt::{self, Formatter}; +use std::hash::Hash; +use std::hash; +use std::path::Path; +use std::sync::Arc; + +use semver; +use serde::de; +use serde::ser; + +use util::{CargoResult, ToSemver}; +use core::source::SourceId; +use core::interning::InternedString; + +/// Identifier for a specific version of a package in a specific source. +#[derive(Clone)] +pub struct PackageId { + inner: Arc, +} + +#[derive(PartialEq, PartialOrd, Eq, Ord)] +struct PackageIdInner { + name: InternedString, + version: semver::Version, + source_id: SourceId, +} + +impl ser::Serialize for PackageId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + s.collect_str(&format_args!( + "{} {} ({})", + self.inner.name, + self.inner.version, + self.inner.source_id.to_url() + )) + } +} + +impl<'de> de::Deserialize<'de> for PackageId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + let mut s = string.splitn(3, ' '); + let name = s.next().unwrap(); + let version = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let version = semver::Version::parse(version).map_err(de::Error::custom)?; + let url = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let url = if url.starts_with('(') && url.ends_with(')') { + &url[1..url.len() - 1] + } else { + return Err(de::Error::custom("invalid serialized PackageId")); + }; + let source_id = SourceId::from_url(url).map_err(de::Error::custom)?; + + Ok(PackageId { + inner: Arc::new(PackageIdInner { + name: InternedString::new(name), + version, + source_id, + }), + }) + } +} + +impl Hash for PackageId { + fn hash(&self, state: &mut S) { + self.inner.name.hash(state); + self.inner.version.hash(state); + self.inner.source_id.hash(state); + } +} + +impl PartialEq for PackageId { + fn eq(&self, other: &PackageId) -> bool { + (*self.inner).eq(&*other.inner) + } +} +impl PartialOrd for PackageId { + fn partial_cmp(&self, other: &PackageId) -> Option { + (*self.inner).partial_cmp(&*other.inner) + } +} +impl Eq for PackageId {} +impl Ord for PackageId { + fn cmp(&self, other: &PackageId) -> Ordering { + (*self.inner).cmp(&*other.inner) + } +} + +impl PackageId { + pub fn new(name: &str, version: T, sid: &SourceId) -> CargoResult { + let v = version.to_semver()?; + Ok(PackageId { + inner: Arc::new(PackageIdInner { + name: InternedString::new(name), + version: v, + source_id: sid.clone(), + }), + }) + } + + pub fn name(&self) -> InternedString { + self.inner.name + } + pub fn version(&self) -> &semver::Version { + &self.inner.version + } + pub fn source_id(&self) -> &SourceId { + &self.inner.source_id + } + + pub fn with_precise(&self, precise: Option) -> PackageId { + PackageId { + inner: Arc::new(PackageIdInner { + name: self.inner.name, + version: self.inner.version.clone(), + source_id: self.inner.source_id.with_precise(precise), + }), + } + } + + pub fn with_source_id(&self, source: &SourceId) -> PackageId { + PackageId { + inner: Arc::new(PackageIdInner { + name: self.inner.name, + version: self.inner.version.clone(), + source_id: source.clone(), + }), + } + } + + pub fn stable_hash<'a>(&'a self, workspace: &'a Path) -> PackageIdStableHash<'a> { + PackageIdStableHash(self, workspace) + } +} + +pub struct PackageIdStableHash<'a>(&'a PackageId, &'a Path); + +impl<'a> Hash for PackageIdStableHash<'a> { + fn hash(&self, state: &mut S) { + self.0.inner.name.hash(state); + self.0.inner.version.hash(state); + self.0.inner.source_id.stable_hash(self.1, state); + } +} + +impl fmt::Display for PackageId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{} v{}", self.inner.name, self.inner.version)?; + + if !self.inner.source_id.is_default_registry() { + write!(f, " ({})", self.inner.source_id)?; + } + + Ok(()) + } +} + +impl fmt::Debug for PackageId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + f.debug_struct("PackageId") + .field("name", &self.inner.name) + .field("version", &self.inner.version.to_string()) + .field("source", &self.inner.source_id.to_string()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::PackageId; + use core::source::SourceId; + use sources::CRATES_IO; + use util::ToUrl; + + #[test] + fn invalid_version_handled_nicely() { + let loc = CRATES_IO.to_url().unwrap(); + let repo = SourceId::for_registry(&loc).unwrap(); + + assert!(PackageId::new("foo", "1.0", &repo).is_err()); + assert!(PackageId::new("foo", "1", &repo).is_err()); + assert!(PackageId::new("foo", "bar", &repo).is_err()); + assert!(PackageId::new("foo", "", &repo).is_err()); + } +} diff --git a/src/cargo/core/package_id_spec.rs b/src/cargo/core/package_id_spec.rs new file mode 100644 index 000000000..43dd9683d --- /dev/null +++ b/src/cargo/core/package_id_spec.rs @@ -0,0 +1,378 @@ +use std::collections::HashMap; +use std::fmt; + +use semver::Version; +use serde::{de, ser}; +use url::Url; + +use core::PackageId; +use util::{ToSemver, ToUrl}; +use util::errors::{CargoResult, CargoResultExt}; + +/// Some or all of the data required to identify a package: +/// +/// 1. the package name (a `String`, required) +/// 2. the package version (a `Version`, optional) +/// 3. the package source (a `Url`, optional) +/// +/// If any of the optional fields are omitted, then the package id may be ambiguous, there may be +/// more than one package/version/url combo that will match. However, often just the name is +/// sufficient to uniquely define a package id. +#[derive(Clone, PartialEq, Eq, Debug, Hash, Ord, PartialOrd)] +pub struct PackageIdSpec { + name: String, + version: Option, + url: Option, +} + +impl PackageIdSpec { + /// Parses a spec string and returns a `PackageIdSpec` if the string was valid. + /// + /// # Examples + /// Some examples of valid strings + /// + /// ``` + /// use cargo::core::PackageIdSpec; + /// + /// let specs = vec![ + /// "http://crates.io/foo#1.2.3", + /// "http://crates.io/foo#bar:1.2.3", + /// "crates.io/foo", + /// "crates.io/foo#1.2.3", + /// "crates.io/foo#bar", + /// "crates.io/foo#bar:1.2.3", + /// "foo", + /// "foo:1.2.3", + /// ]; + /// for spec in specs { + /// assert!(PackageIdSpec::parse(spec).is_ok()); + /// } + pub fn parse(spec: &str) -> CargoResult { + if spec.contains('/') { + if let Ok(url) = spec.to_url() { + return PackageIdSpec::from_url(url); + } + if !spec.contains("://") { + if let Ok(url) = Url::parse(&format!("cargo://{}", spec)) { + return PackageIdSpec::from_url(url); + } + } + } + let mut parts = spec.splitn(2, ':'); + let name = parts.next().unwrap(); + let version = match parts.next() { + Some(version) => Some(Version::parse(version)?), + None => None, + }; + for ch in name.chars() { + if !ch.is_alphanumeric() && ch != '_' && ch != '-' { + bail!("invalid character in pkgid `{}`: `{}`", spec, ch) + } + } + Ok(PackageIdSpec { + name: name.to_string(), + version, + url: None, + }) + } + + /// Roughly equivalent to `PackageIdSpec::parse(spec)?.query(i)` + pub fn query_str<'a, I>(spec: &str, i: I) -> CargoResult<&'a PackageId> + where + I: IntoIterator, + { + let spec = PackageIdSpec::parse(spec) + .chain_err(|| format_err!("invalid package id specification: `{}`", spec))?; + spec.query(i) + } + + /// Convert a `PackageId` to a `PackageIdSpec`, which will have both the `Version` and `Url` + /// fields filled in. + pub fn from_package_id(package_id: &PackageId) -> PackageIdSpec { + PackageIdSpec { + name: package_id.name().to_string(), + version: Some(package_id.version().clone()), + url: Some(package_id.source_id().url().clone()), + } + } + + /// Tries to convert a valid `Url` to a `PackageIdSpec`. + fn from_url(mut url: Url) -> CargoResult { + if url.query().is_some() { + bail!("cannot have a query string in a pkgid: {}", url) + } + let frag = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + let (name, version) = { + let mut path = url.path_segments() + .ok_or_else(|| format_err!("pkgid urls must have a path: {}", url))?; + let path_name = path.next_back().ok_or_else(|| { + format_err!( + "pkgid urls must have at least one path \ + component: {}", + url + ) + })?; + match frag { + Some(fragment) => { + let mut parts = fragment.splitn(2, ':'); + let name_or_version = parts.next().unwrap(); + match parts.next() { + Some(part) => { + let version = part.to_semver()?; + (name_or_version.to_string(), Some(version)) + } + None => { + if name_or_version.chars().next().unwrap().is_alphabetic() { + (name_or_version.to_string(), None) + } else { + let version = name_or_version.to_semver()?; + (path_name.to_string(), Some(version)) + } + } + } + } + None => (path_name.to_string(), None), + } + }; + Ok(PackageIdSpec { + name, + version, + url: Some(url), + }) + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn version(&self) -> Option<&Version> { + self.version.as_ref() + } + + pub fn url(&self) -> Option<&Url> { + self.url.as_ref() + } + + pub fn set_url(&mut self, url: Url) { + self.url = Some(url); + } + + /// Checkes whether the given `PackageId` matches the `PackageIdSpec`. + pub fn matches(&self, package_id: &PackageId) -> bool { + if self.name() != &*package_id.name() { + return false; + } + + if let Some(ref v) = self.version { + if v != package_id.version() { + return false; + } + } + + match self.url { + Some(ref u) => u == package_id.source_id().url(), + None => true, + } + } + + /// Checks a list of `PackageId`s to find 1 that matches this `PackageIdSpec`. If 0, 2, or + /// more are found, then this returns an error. + pub fn query<'a, I>(&self, i: I) -> CargoResult<&'a PackageId> + where + I: IntoIterator, + { + let mut ids = i.into_iter().filter(|p| self.matches(*p)); + let ret = match ids.next() { + Some(id) => id, + None => bail!( + "package id specification `{}` \ + matched no packages", + self + ), + }; + return match ids.next() { + Some(other) => { + let mut msg = format!( + "There are multiple `{}` packages in \ + your project, and the specification \ + `{}` is ambiguous.\n\ + Please re-run this command \ + with `-p ` where `` is one \ + of the following:", + self.name(), + self + ); + let mut vec = vec![ret, other]; + vec.extend(ids); + minimize(&mut msg, &vec, self); + Err(format_err!("{}", msg)) + } + None => Ok(ret), + }; + + fn minimize(msg: &mut String, ids: &[&PackageId], spec: &PackageIdSpec) { + let mut version_cnt = HashMap::new(); + for id in ids { + *version_cnt.entry(id.version()).or_insert(0) += 1; + } + for id in ids { + if version_cnt[id.version()] == 1 { + msg.push_str(&format!("\n {}:{}", spec.name(), id.version())); + } else { + msg.push_str(&format!("\n {}", PackageIdSpec::from_package_id(*id))); + } + } + } + } +} + +impl fmt::Display for PackageIdSpec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut printed_name = false; + match self.url { + Some(ref url) => { + if url.scheme() == "cargo" { + write!(f, "{}{}", url.host().unwrap(), url.path())?; + } else { + write!(f, "{}", url)?; + } + if url.path_segments().unwrap().next_back().unwrap() != self.name { + printed_name = true; + write!(f, "#{}", self.name)?; + } + } + None => { + printed_name = true; + write!(f, "{}", self.name)? + } + } + if let Some(ref v) = self.version { + write!(f, "{}{}", if printed_name { ":" } else { "#" }, v)?; + } + Ok(()) + } +} + +impl ser::Serialize for PackageIdSpec { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl<'de> de::Deserialize<'de> for PackageIdSpec { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + PackageIdSpec::parse(&string).map_err(de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use core::{PackageId, SourceId}; + use super::PackageIdSpec; + use url::Url; + use semver::Version; + + #[test] + fn good_parsing() { + fn ok(spec: &str, expected: PackageIdSpec) { + let parsed = PackageIdSpec::parse(spec).unwrap(); + assert_eq!(parsed, expected); + assert_eq!(parsed.to_string(), spec); + } + + ok( + "http://crates.io/foo#1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }, + ); + ok( + "http://crates.io/foo#bar:1.2.3", + PackageIdSpec { + name: "bar".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo", + PackageIdSpec { + name: "foo".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#bar", + PackageIdSpec { + name: "bar".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#bar:1.2.3", + PackageIdSpec { + name: "bar".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "foo", + PackageIdSpec { + name: "foo".to_string(), + version: None, + url: None, + }, + ); + ok( + "foo:1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: None, + }, + ); + } + + #[test] + fn bad_parsing() { + assert!(PackageIdSpec::parse("baz:").is_err()); + assert!(PackageIdSpec::parse("baz:*").is_err()); + assert!(PackageIdSpec::parse("baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://#baz:1.0").is_err()); + } + + #[test] + fn matching() { + let url = Url::parse("http://example.com").unwrap(); + let sid = SourceId::for_registry(&url).unwrap(); + let foo = PackageId::new("foo", "1.2.3", &sid).unwrap(); + let bar = PackageId::new("bar", "1.2.3", &sid).unwrap(); + + assert!(PackageIdSpec::parse("foo").unwrap().matches(&foo)); + assert!(!PackageIdSpec::parse("foo").unwrap().matches(&bar)); + assert!(PackageIdSpec::parse("foo:1.2.3").unwrap().matches(&foo)); + assert!(!PackageIdSpec::parse("foo:1.2.2").unwrap().matches(&foo)); + } +} diff --git a/src/cargo/core/profiles.rs b/src/cargo/core/profiles.rs new file mode 100644 index 000000000..a83ed755c --- /dev/null +++ b/src/cargo/core/profiles.rs @@ -0,0 +1,485 @@ +use std::collections::HashSet; +use std::{cmp, fmt, hash}; + +use core::compiler::CompileMode; +use core::interning::InternedString; +use core::{PackageId, PackageIdSpec, PackageSet, Shell}; +use util::lev_distance::lev_distance; +use util::toml::{ProfilePackageSpec, StringOrBool, TomlProfile, U32OrBool}; +use util::CargoResult; + +/// Collection of all user profiles. +#[derive(Clone, Debug)] +pub struct Profiles { + dev: ProfileMaker, + release: ProfileMaker, + test: ProfileMaker, + bench: ProfileMaker, + doc: ProfileMaker, +} + +impl Profiles { + pub fn new( + dev: Option, + release: Option, + test: Option, + bench: Option, + doc: Option, + ) -> Profiles { + Profiles { + dev: ProfileMaker { + default: Profile::default_dev(), + toml: dev, + }, + release: ProfileMaker { + default: Profile::default_release(), + toml: release, + }, + test: ProfileMaker { + default: Profile::default_test(), + toml: test, + }, + bench: ProfileMaker { + default: Profile::default_bench(), + toml: bench, + }, + doc: ProfileMaker { + default: Profile::default_doc(), + toml: doc, + }, + } + } + + /// Retrieve the profile for a target. + /// `is_member` is whether or not this package is a member of the + /// workspace. + pub fn get_profile( + &self, + pkg_id: &PackageId, + is_member: bool, + profile_for: ProfileFor, + mode: CompileMode, + release: bool, + ) -> Profile { + let maker = match mode { + CompileMode::Test => { + if release { + &self.bench + } else { + &self.test + } + } + CompileMode::Build + | CompileMode::Check { .. } + | CompileMode::Doctest + | CompileMode::RunCustomBuild => { + // Note: RunCustomBuild doesn't normally use this code path. + // `build_unit_profiles` normally ensures that it selects the + // ancestor's profile. However `cargo clean -p` can hit this + // path. + if release { + &self.release + } else { + &self.dev + } + } + CompileMode::Bench => &self.bench, + CompileMode::Doc { .. } => &self.doc, + }; + let mut profile = maker.profile_for(Some(pkg_id), is_member, profile_for); + // `panic` should not be set for tests/benches, or any of their + // dependencies. + if profile_for == ProfileFor::TestDependency || mode.is_any_test() { + profile.panic = None; + } + profile + } + + /// The profile for *running* a `build.rs` script is only used for setting + /// a few environment variables. To ensure proper de-duplication of the + /// running `Unit`, this uses a stripped-down profile (so that unrelated + /// profile flags don't cause `build.rs` to needlessly run multiple + /// times). + pub fn get_profile_run_custom_build(&self, for_unit_profile: &Profile) -> Profile { + let mut result = Profile::default(); + result.debuginfo = for_unit_profile.debuginfo; + result.opt_level = for_unit_profile.opt_level; + result + } + + /// This returns a generic base profile. This is currently used for the + /// `[Finished]` line. It is not entirely accurate, since it doesn't + /// select for the package that was actually built. + pub fn base_profile(&self, release: bool) -> Profile { + if release { + self.release.profile_for(None, true, ProfileFor::Any) + } else { + self.dev.profile_for(None, true, ProfileFor::Any) + } + } + + /// Used to check for overrides for non-existing packages. + pub fn validate_packages(&self, shell: &mut Shell, packages: &PackageSet) -> CargoResult<()> { + self.dev.validate_packages(shell, packages)?; + self.release.validate_packages(shell, packages)?; + self.test.validate_packages(shell, packages)?; + self.bench.validate_packages(shell, packages)?; + self.doc.validate_packages(shell, packages)?; + Ok(()) + } +} + +/// An object used for handling the profile override hierarchy. +/// +/// The precedence of profiles are (first one wins): +/// - [profile.dev.overrides.name] - A named package. +/// - [profile.dev.overrides."*"] - This cannot apply to workspace members. +/// - [profile.dev.build-override] - This can only apply to `build.rs` scripts +/// and their dependencies. +/// - [profile.dev] +/// - Default (hard-coded) values. +#[derive(Debug, Clone)] +struct ProfileMaker { + default: Profile, + toml: Option, +} + +impl ProfileMaker { + fn profile_for( + &self, + pkg_id: Option<&PackageId>, + is_member: bool, + profile_for: ProfileFor, + ) -> Profile { + let mut profile = self.default; + if let Some(ref toml) = self.toml { + merge_profile(&mut profile, toml); + if profile_for == ProfileFor::CustomBuild { + if let Some(ref build_override) = toml.build_override { + merge_profile(&mut profile, build_override); + } + } + if let Some(ref overrides) = toml.overrides { + if !is_member { + if let Some(all) = overrides.get(&ProfilePackageSpec::All) { + merge_profile(&mut profile, all); + } + } + if let Some(pkg_id) = pkg_id { + let mut matches = overrides.iter().filter_map( + |(key, spec_profile)| match key { + &ProfilePackageSpec::All => None, + &ProfilePackageSpec::Spec(ref s) => if s.matches(pkg_id) { + Some(spec_profile) + } else { + None + }, + }, + ); + if let Some(spec_profile) = matches.next() { + merge_profile(&mut profile, spec_profile); + // `validate_packages` should ensure that there are + // no additional matches. + assert!( + matches.next().is_none(), + "package `{}` matched multiple profile overrides", + pkg_id + ); + } + } + } + } + profile + } + + fn validate_packages(&self, shell: &mut Shell, packages: &PackageSet) -> CargoResult<()> { + let toml = match self.toml { + Some(ref toml) => toml, + None => return Ok(()), + }; + let overrides = match toml.overrides { + Some(ref overrides) => overrides, + None => return Ok(()), + }; + // Verify that a package doesn't match multiple spec overrides. + let mut found = HashSet::new(); + for pkg_id in packages.package_ids() { + let matches: Vec<&PackageIdSpec> = overrides + .keys() + .filter_map(|key| match key { + &ProfilePackageSpec::All => None, + &ProfilePackageSpec::Spec(ref spec) => if spec.matches(pkg_id) { + Some(spec) + } else { + None + }, + }) + .collect(); + match matches.len() { + 0 => {} + 1 => { + found.insert(matches[0].clone()); + } + _ => { + let specs = matches + .iter() + .map(|spec| spec.to_string()) + .collect::>() + .join(", "); + bail!( + "multiple profile overrides in profile `{}` match package `{}`\n\ + found profile override specs: {}", + self.default.name, + pkg_id, + specs + ); + } + } + } + + // Verify every override matches at least one package. + let missing_specs = overrides.keys().filter_map(|key| { + if let &ProfilePackageSpec::Spec(ref spec) = key { + if !found.contains(spec) { + return Some(spec); + } + } + None + }); + for spec in missing_specs { + // See if there is an exact name match. + let name_matches: Vec = packages + .package_ids() + .filter_map(|pkg_id| { + if pkg_id.name().as_str() == spec.name() { + Some(pkg_id.to_string()) + } else { + None + } + }) + .collect(); + if name_matches.len() == 0 { + let suggestion = packages + .package_ids() + .map(|p| (lev_distance(spec.name(), &p.name()), p.name())) + .filter(|&(d, _)| d < 4) + .min_by_key(|p| p.0) + .map(|p| p.1); + match suggestion { + Some(p) => shell.warn(format!( + "profile override spec `{}` did not match any packages\n\n\ + Did you mean `{}`?", + spec, p + ))?, + None => shell.warn(format!( + "profile override spec `{}` did not match any packages", + spec + ))?, + } + } else { + shell.warn(format!( + "version or URL in profile override spec `{}` does not \ + match any of the packages: {}", + spec, + name_matches.join(", ") + ))?; + } + } + Ok(()) + } +} + +fn merge_profile(profile: &mut Profile, toml: &TomlProfile) { + if let Some(ref opt_level) = toml.opt_level { + profile.opt_level = InternedString::new(&opt_level.0); + } + match toml.lto { + Some(StringOrBool::Bool(b)) => profile.lto = Lto::Bool(b), + Some(StringOrBool::String(ref n)) => profile.lto = Lto::Named(InternedString::new(n)), + None => {} + } + if toml.codegen_units.is_some() { + profile.codegen_units = toml.codegen_units; + } + match toml.debug { + Some(U32OrBool::U32(debug)) => profile.debuginfo = Some(debug), + Some(U32OrBool::Bool(true)) => profile.debuginfo = Some(2), + Some(U32OrBool::Bool(false)) => profile.debuginfo = None, + None => {} + } + if let Some(debug_assertions) = toml.debug_assertions { + profile.debug_assertions = debug_assertions; + } + if let Some(rpath) = toml.rpath { + profile.rpath = rpath; + } + if let Some(ref panic) = toml.panic { + profile.panic = Some(InternedString::new(panic)); + } + if let Some(overflow_checks) = toml.overflow_checks { + profile.overflow_checks = overflow_checks; + } + if let Some(incremental) = toml.incremental { + profile.incremental = incremental; + } +} + +/// Profile settings used to determine which compiler flags to use for a +/// target. +#[derive(Debug, Clone, Copy, Eq)] +pub struct Profile { + pub name: &'static str, + pub opt_level: InternedString, + pub lto: Lto, + // None = use rustc default + pub codegen_units: Option, + pub debuginfo: Option, + pub debug_assertions: bool, + pub overflow_checks: bool, + pub rpath: bool, + pub incremental: bool, + pub panic: Option, +} + +impl Default for Profile { + fn default() -> Profile { + Profile { + name: "", + opt_level: InternedString::new("0"), + lto: Lto::Bool(false), + codegen_units: None, + debuginfo: None, + debug_assertions: false, + overflow_checks: false, + rpath: false, + incremental: false, + panic: None, + } + } +} + +impl fmt::Display for Profile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Profile({})", self.name) + } +} + +impl hash::Hash for Profile { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { + self.comparable().hash(state); + } +} + +impl cmp::PartialEq for Profile { + fn eq(&self, other: &Self) -> bool { + self.comparable() == other.comparable() + } +} + +impl Profile { + fn default_dev() -> Profile { + Profile { + name: "dev", + debuginfo: Some(2), + debug_assertions: true, + overflow_checks: true, + incremental: true, + ..Profile::default() + } + } + + fn default_release() -> Profile { + Profile { + name: "release", + opt_level: InternedString::new("3"), + ..Profile::default() + } + } + + fn default_test() -> Profile { + Profile { + name: "test", + ..Profile::default_dev() + } + } + + fn default_bench() -> Profile { + Profile { + name: "bench", + ..Profile::default_release() + } + } + + fn default_doc() -> Profile { + Profile { + name: "doc", + ..Profile::default_dev() + } + } + + /// Compare all fields except `name`, which doesn't affect compilation. + /// This is necessary for `Unit` deduplication for things like "test" and + /// "dev" which are essentially the same. + fn comparable( + &self, + ) -> ( + &InternedString, + &Lto, + &Option, + &Option, + &bool, + &bool, + &bool, + &bool, + &Option, + ) { + ( + &self.opt_level, + &self.lto, + &self.codegen_units, + &self.debuginfo, + &self.debug_assertions, + &self.overflow_checks, + &self.rpath, + &self.incremental, + &self.panic, + ) + } +} + +/// The link-time-optimization setting. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +pub enum Lto { + /// False = no LTO + /// True = "Fat" LTO + Bool(bool), + /// Named LTO settings like "thin". + Named(InternedString), +} + +/// A flag used in `Unit` to indicate the purpose for the target. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub enum ProfileFor { + /// A general-purpose target. + Any, + /// A target for `build.rs` or any of its dependencies. This enables + /// `build-override` profiles for these targets. + CustomBuild, + /// A target that is a dependency of a test or benchmark. Currently this + /// enforces that the `panic` setting is not set. + TestDependency, +} + +impl ProfileFor { + pub fn all_values() -> &'static [ProfileFor] { + static ALL: [ProfileFor; 3] = [ + ProfileFor::Any, + ProfileFor::CustomBuild, + ProfileFor::TestDependency, + ]; + &ALL + } +} diff --git a/src/cargo/core/registry.rs b/src/cargo/core/registry.rs new file mode 100644 index 000000000..904179c7a --- /dev/null +++ b/src/cargo/core/registry.rs @@ -0,0 +1,607 @@ +use std::collections::HashMap; + +use semver::VersionReq; +use url::Url; + +use core::{Dependency, PackageId, Source, SourceId, SourceMap, Summary}; +use core::PackageSet; +use util::{profile, Config}; +use util::errors::{CargoResult, CargoResultExt}; +use sources::config::SourceConfigMap; + +/// Source of information about a group of packages. +/// +/// See also `core::Source`. +pub trait Registry { + /// Attempt to find the packages that match a dependency request. + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()>; + + fn query_vec(&mut self, dep: &Dependency) -> CargoResult> { + let mut ret = Vec::new(); + self.query(dep, &mut |s| ret.push(s))?; + Ok(ret) + } +} + +/// This structure represents a registry of known packages. It internally +/// contains a number of `Box` instances which are used to load a +/// `Package` from. +/// +/// The resolution phase of Cargo uses this to drive knowledge about new +/// packages as well as querying for lists of new packages. It is here that +/// sources are updated (e.g. network operations) and overrides are +/// handled. +/// +/// The general idea behind this registry is that it is centered around the +/// `SourceMap` structure, contained within which is a mapping of a `SourceId` to +/// a `Source`. Each `Source` in the map has been updated (using network +/// operations if necessary) and is ready to be queried for packages. +pub struct PackageRegistry<'cfg> { + sources: SourceMap<'cfg>, + + // A list of sources which are considered "overrides" which take precedent + // when querying for packages. + overrides: Vec, + + // Note that each SourceId does not take into account its `precise` field + // when hashing or testing for equality. When adding a new `SourceId`, we + // want to avoid duplicates in the `SourceMap` (to prevent re-updating the + // same git repo twice for example), but we also want to ensure that the + // loaded source is always updated. + // + // Sources with a `precise` field normally don't need to be updated because + // their contents are already on disk, but sources without a `precise` field + // almost always need to be updated. If we have a cached `Source` for a + // precise `SourceId`, then when we add a new `SourceId` that is not precise + // we want to ensure that the underlying source is updated. + // + // This is basically a long-winded way of saying that we want to know + // precisely what the keys of `sources` are, so this is a mapping of key to + // what exactly the key is. + source_ids: HashMap, + + locked: LockedMap, + source_config: SourceConfigMap<'cfg>, + + patches: HashMap>, + patches_locked: bool, + patches_available: HashMap>, +} + +type LockedMap = HashMap)>>>; + +#[derive(PartialEq, Eq, Clone, Copy)] +enum Kind { + Override, + Locked, + Normal, +} + +impl<'cfg> PackageRegistry<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let source_config = SourceConfigMap::new(config)?; + Ok(PackageRegistry { + sources: SourceMap::new(), + source_ids: HashMap::new(), + overrides: Vec::new(), + source_config, + locked: HashMap::new(), + patches: HashMap::new(), + patches_locked: false, + patches_available: HashMap::new(), + }) + } + + pub fn get(self, package_ids: &[PackageId]) -> PackageSet<'cfg> { + trace!("getting packages; sources={}", self.sources.len()); + PackageSet::new(package_ids, self.sources) + } + + fn ensure_loaded(&mut self, namespace: &SourceId, kind: Kind) -> CargoResult<()> { + match self.source_ids.get(namespace) { + // We've previously loaded this source, and we've already locked it, + // so we're not allowed to change it even if `namespace` has a + // slightly different precise version listed. + Some(&(_, Kind::Locked)) => { + debug!("load/locked {}", namespace); + return Ok(()); + } + + // If the previous source was not a precise source, then we can be + // sure that it's already been updated if we've already loaded it. + Some(&(ref previous, _)) if previous.precise().is_none() => { + debug!("load/precise {}", namespace); + return Ok(()); + } + + // If the previous source has the same precise version as we do, + // then we're done, otherwise we need to need to move forward + // updating this source. + Some(&(ref previous, _)) => { + if previous.precise() == namespace.precise() { + debug!("load/match {}", namespace); + return Ok(()); + } + debug!("load/mismatch {}", namespace); + } + None => { + debug!("load/missing {}", namespace); + } + } + + self.load(namespace, kind)?; + Ok(()) + } + + pub fn add_sources(&mut self, ids: &[SourceId]) -> CargoResult<()> { + for id in ids.iter() { + self.ensure_loaded(id, Kind::Locked)?; + } + Ok(()) + } + + pub fn add_preloaded(&mut self, source: Box) { + self.add_source(source, Kind::Locked); + } + + fn add_source(&mut self, source: Box, kind: Kind) { + let id = source.source_id().clone(); + self.sources.insert(source); + self.source_ids.insert(id.clone(), (id, kind)); + } + + pub fn add_override(&mut self, source: Box) { + self.overrides.push(source.source_id().clone()); + self.add_source(source, Kind::Override); + } + + pub fn register_lock(&mut self, id: PackageId, deps: Vec) { + trace!("register_lock: {}", id); + for dep in deps.iter() { + trace!("\t-> {}", dep); + } + let sub_map = self.locked + .entry(id.source_id().clone()) + .or_insert_with(HashMap::new); + let sub_vec = sub_map + .entry(id.name().to_string()) + .or_insert_with(Vec::new); + sub_vec.push((id, deps)); + } + + /// Insert a `[patch]` section into this registry. + /// + /// This method will insert a `[patch]` section for the `url` specified, + /// with the given list of dependencies. The `url` specified is the URL of + /// the source to patch (for example this is `crates-io` in the manifest). + /// The `deps` is an array of all the entries in the `[patch]` section of + /// the manifest. + /// + /// Here the `deps` will be resolved to a precise version and stored + /// internally for future calls to `query` below. It's expected that `deps` + /// have had `lock_to` call already, if applicable. (e.g. if a lock file was + /// already present). + /// + /// Note that the patch list specified here *will not* be available to + /// `query` until `lock_patches` is called below, which should be called + /// once all patches have been added. + pub fn patch(&mut self, url: &Url, deps: &[Dependency]) -> CargoResult<()> { + // First up we need to actually resolve each `deps` specification to + // precisely one summary. We're not using the `query` method below as it + // internally uses maps we're building up as part of this method + // (`patches_available` and `patches). Instead we're going straight to + // the source to load information from it. + // + // Remember that each dependency listed in `[patch]` has to resolve to + // precisely one package, so that's why we're just creating a flat list + // of summaries which should be the same length as `deps` above. + let unlocked_summaries = deps.iter() + .map(|dep| { + debug!("registring a patch for `{}` with `{}`", url, dep.name()); + + // Go straight to the source for resolving `dep`. Load it as we + // normally would and then ask it directly for the list of summaries + // corresponding to this `dep`. + self.ensure_loaded(dep.source_id(), Kind::Normal) + .chain_err(|| { + format_err!( + "failed to load source for a dependency \ + on `{}`", + dep.name() + ) + })?; + + let mut summaries = self.sources + .get_mut(dep.source_id()) + .expect("loaded source not present") + .query_vec(dep)? + .into_iter(); + + let summary = match summaries.next() { + Some(summary) => summary, + None => bail!( + "patch for `{}` in `{}` did not resolve to any crates. If this is \ + unexpected, you may wish to consult: \ + https://github.com/rust-lang/cargo/issues/4678", + dep.name(), + url + ), + }; + if summaries.next().is_some() { + bail!( + "patch for `{}` in `{}` resolved to more than one candidate", + dep.name(), + url + ) + } + if summary.package_id().source_id().url() == url { + bail!( + "patch for `{}` in `{}` points to the same source, but \ + patches must point to different sources", + dep.name(), + url + ); + } + Ok(summary) + }) + .collect::>>() + .chain_err(|| format_err!("failed to resolve patches for `{}`", url))?; + + // Note that we do not use `lock` here to lock summaries! That step + // happens later once `lock_patches` is invoked. In the meantime though + // we want to fill in the `patches_available` map (later used in the + // `lock` method) and otherwise store the unlocked summaries in + // `patches` to get locked in a future call to `lock_patches`. + let ids = unlocked_summaries + .iter() + .map(|s| s.package_id()) + .cloned() + .collect(); + self.patches_available.insert(url.clone(), ids); + self.patches.insert(url.clone(), unlocked_summaries); + + Ok(()) + } + + /// Lock all patch summaries added via `patch`, making them available to + /// resolution via `query`. + /// + /// This function will internally `lock` each summary added via `patch` + /// above now that the full set of `patch` packages are known. This'll allow + /// us to correctly resolve overridden dependencies between patches + /// hopefully! + pub fn lock_patches(&mut self) { + assert!(!self.patches_locked); + for summaries in self.patches.values_mut() { + for summary in summaries { + *summary = lock(&self.locked, &self.patches_available, summary.clone()); + } + } + self.patches_locked = true; + } + + pub fn patches(&self) -> &HashMap> { + &self.patches + } + + fn load(&mut self, source_id: &SourceId, kind: Kind) -> CargoResult<()> { + (|| { + let source = self.source_config.load(source_id)?; + assert_eq!(source.source_id(), source_id); + + if kind == Kind::Override { + self.overrides.push(source_id.clone()); + } + self.add_source(source, kind); + + // Ensure the source has fetched all necessary remote data. + let _p = profile::start(format!("updating: {}", source_id)); + self.sources.get_mut(source_id).unwrap().update() + })() + .chain_err(|| format_err!("Unable to update {}", source_id))?; + Ok(()) + } + + fn query_overrides(&mut self, dep: &Dependency) -> CargoResult> { + for s in self.overrides.iter() { + let src = self.sources.get_mut(s).unwrap(); + let dep = Dependency::new_override(&*dep.name(), s); + let mut results = src.query_vec(&dep)?; + if !results.is_empty() { + return Ok(Some(results.remove(0))); + } + } + Ok(None) + } + + /// This function is used to transform a summary to another locked summary + /// if possible. This is where the concept of a lockfile comes into play. + /// + /// If a summary points at a package id which was previously locked, then we + /// override the summary's id itself, as well as all dependencies, to be + /// rewritten to the locked versions. This will transform the summary's + /// source to a precise source (listed in the locked version) as well as + /// transforming all of the dependencies from range requirements on + /// imprecise sources to exact requirements on precise sources. + /// + /// If a summary does not point at a package id which was previously locked, + /// or if any dependencies were added and don't have a previously listed + /// version, we still want to avoid updating as many dependencies as + /// possible to keep the graph stable. In this case we map all of the + /// summary's dependencies to be rewritten to a locked version wherever + /// possible. If we're unable to map a dependency though, we just pass it on + /// through. + pub fn lock(&self, summary: Summary) -> Summary { + assert!(self.patches_locked); + lock(&self.locked, &self.patches_available, summary) + } + + fn warn_bad_override( + &self, + override_summary: &Summary, + real_summary: &Summary, + ) -> CargoResult<()> { + let mut real_deps = real_summary.dependencies().iter().collect::>(); + + let boilerplate = "\ +This is currently allowed but is known to produce buggy behavior with spurious +recompiles and changes to the crate graph. Path overrides unfortunately were +never intended to support this feature, so for now this message is just a +warning. In the future, however, this message will become a hard error. + +To change the dependency graph via an override it's recommended to use the +`[replace]` feature of Cargo instead of the path override feature. This is +documented online at the url below for more information. + +http://doc.crates.io/specifying-dependencies.html#overriding-dependencies +"; + + for dep in override_summary.dependencies() { + if let Some(i) = real_deps.iter().position(|d| dep == *d) { + real_deps.remove(i); + continue; + } + let msg = format!( + "\ + path override for crate `{}` has altered the original list of\n\ + dependencies; the dependency on `{}` was either added or\n\ + modified to not match the previously resolved version\n\n\ + {}", + override_summary.package_id().name(), + dep.name(), + boilerplate + ); + self.source_config.config().shell().warn(&msg)?; + return Ok(()); + } + + if let Some(id) = real_deps.get(0) { + let msg = format!( + "\ + path override for crate `{}` has altered the original list of + dependencies; the dependency on `{}` was removed\n\n + {}", + override_summary.package_id().name(), + id.name(), + boilerplate + ); + self.source_config.config().shell().warn(&msg)?; + return Ok(()); + } + + Ok(()) + } +} + +impl<'cfg> Registry for PackageRegistry<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + assert!(self.patches_locked); + let (override_summary, n, to_warn) = { + // Look for an override and get ready to query the real source. + let override_summary = self.query_overrides(dep)?; + + // Next up on our list of candidates is to check the `[patch]` + // section of the manifest. Here we look through all patches + // relevant to the source that `dep` points to, and then we match + // name/version. Note that we don't use `dep.matches(..)` because + // the patches, by definition, come from a different source. + // This means that `dep.matches(..)` will always return false, when + // what we really care about is the name/version match. + let mut patches = Vec::::new(); + if let Some(extra) = self.patches.get(dep.source_id().url()) { + patches.extend( + extra + .iter() + .filter(|s| dep.matches_ignoring_source(s.package_id())) + .cloned(), + ); + } + + // A crucial feature of the `[patch]` feature is that we *don't* + // query the actual registry if we have a "locked" dependency. A + // locked dep basically just means a version constraint of `=a.b.c`, + // and because patches take priority over the actual source then if + // we have a candidate we're done. + if patches.len() == 1 && dep.is_locked() { + let patch = patches.remove(0); + match override_summary { + Some(summary) => (summary, 1, Some(patch)), + None => { + f(patch); + return Ok(()); + } + } + } else { + if !patches.is_empty() { + debug!( + "found {} patches with an unlocked dep on `{}` at {} \ + with `{}`, \ + looking at sources", + patches.len(), + dep.name(), + dep.source_id(), + dep.version_req() + ); + } + + // Ensure the requested source_id is loaded + self.ensure_loaded(dep.source_id(), Kind::Normal) + .chain_err(|| { + format_err!( + "failed to load source for a dependency \ + on `{}`", + dep.name() + ) + })?; + + let source = self.sources.get_mut(dep.source_id()); + match (override_summary, source) { + (Some(_), None) => bail!("override found but no real ones"), + (None, None) => return Ok(()), + + // If we don't have an override then we just ship + // everything upstairs after locking the summary + (None, Some(source)) => { + for patch in patches.iter() { + f(patch.clone()); + } + + // Our sources shouldn't ever come back to us with two + // summaries that have the same version. We could, + // however, have an `[patch]` section which is in use + // to override a version in the registry. This means + // that if our `summary` in this loop has the same + // version as something in `patches` that we've + // already selected, then we skip this `summary`. + let locked = &self.locked; + let all_patches = &self.patches_available; + return source.query(dep, &mut |summary| { + for patch in patches.iter() { + let patch = patch.package_id().version(); + if summary.package_id().version() == patch { + return; + } + } + f(lock(locked, all_patches, summary)) + }); + } + + // If we have an override summary then we query the source + // to sanity check its results. We don't actually use any of + // the summaries it gives us though. + (Some(override_summary), Some(source)) => { + if !patches.is_empty() { + bail!("found patches and a path override") + } + let mut n = 0; + let mut to_warn = None; + source.query(dep, &mut |summary| { + n += 1; + to_warn = Some(summary); + })?; + (override_summary, n, to_warn) + } + } + } + }; + + if n > 1 { + bail!("found an override with a non-locked list"); + } else if let Some(summary) = to_warn { + self.warn_bad_override(&override_summary, &summary)?; + } + f(self.lock(override_summary)); + Ok(()) + } +} + +fn lock(locked: &LockedMap, patches: &HashMap>, summary: Summary) -> Summary { + let pair = locked + .get(summary.source_id()) + .and_then(|map| map.get(&*summary.name())) + .and_then(|vec| vec.iter().find(|&&(ref id, _)| id == summary.package_id())); + + trace!("locking summary of {}", summary.package_id()); + + // Lock the summary's id if possible + let summary = match pair { + Some(&(ref precise, _)) => summary.override_id(precise.clone()), + None => summary, + }; + summary.map_dependencies(|dep| { + trace!("\t{}/{}/{}", dep.name(), dep.version_req(), dep.source_id()); + + // If we've got a known set of overrides for this summary, then + // one of a few cases can arise: + // + // 1. We have a lock entry for this dependency from the same + // source as it's listed as coming from. In this case we make + // sure to lock to precisely the given package id. + // + // 2. We have a lock entry for this dependency, but it's from a + // different source than what's listed, or the version + // requirement has changed. In this case we must discard the + // locked version because the dependency needs to be + // re-resolved. + // + // 3. We don't have a lock entry for this dependency, in which + // case it was likely an optional dependency which wasn't + // included previously so we just pass it through anyway. + // + // Cases 1/2 are handled by `matches_id` and case 3 is handled by + // falling through to the logic below. + if let Some(&(_, ref locked_deps)) = pair { + let locked = locked_deps.iter().find(|id| dep.matches_id(id)); + if let Some(locked) = locked { + trace!("\tfirst hit on {}", locked); + let mut dep = dep.clone(); + dep.lock_to(locked); + return dep; + } + } + + // If this dependency did not have a locked version, then we query + // all known locked packages to see if they match this dependency. + // If anything does then we lock it to that and move on. + let v = locked + .get(dep.source_id()) + .and_then(|map| map.get(&*dep.name())) + .and_then(|vec| vec.iter().find(|&&(ref id, _)| dep.matches_id(id))); + if let Some(&(ref id, _)) = v { + trace!("\tsecond hit on {}", id); + let mut dep = dep.clone(); + dep.lock_to(id); + return dep; + } + + // Finally we check to see if any registered patches correspond to + // this dependency. + let v = patches.get(dep.source_id().url()).map(|vec| { + let dep2 = dep.clone(); + let mut iter = vec.iter().filter(move |p| { + dep2.name() == p.name() && dep2.version_req().matches(p.version()) + }); + (iter.next(), iter) + }); + if let Some((Some(patch_id), mut remaining)) = v { + assert!(remaining.next().is_none()); + let patch_source = patch_id.source_id(); + let patch_locked = locked + .get(patch_source) + .and_then(|m| m.get(&*patch_id.name())) + .map(|list| list.iter().any(|&(ref id, _)| id == patch_id)) + .unwrap_or(false); + + if patch_locked { + trace!("\tthird hit on {}", patch_id); + let req = VersionReq::exact(patch_id.version()); + let mut dep = dep.clone(); + dep.set_version_req(req); + return dep; + } + } + + trace!("\tnope, unlocked"); + dep + }) +} diff --git a/src/cargo/core/resolver/conflict_cache.rs b/src/cargo/core/resolver/conflict_cache.rs new file mode 100644 index 000000000..38a0ded78 --- /dev/null +++ b/src/cargo/core/resolver/conflict_cache.rs @@ -0,0 +1,97 @@ +use std::collections::{HashMap, HashSet}; + +use core::{Dependency, PackageId}; +use core::resolver::Context; +use super::types::ConflictReason; + +pub(super) struct ConflictCache { + // `con_from_dep` is a cache of the reasons for each time we + // backtrack. For example after several backtracks we may have: + // + // con_from_dep[`foo = "^1.0.2"`] = vec![ + // map!{`foo=1.0.1`: Semver}, + // map!{`foo=1.0.0`: Semver}, + // ]; + // + // This can be read as "we cannot find a candidate for dep `foo = "^1.0.2"` + // if either `foo=1.0.1` OR `foo=1.0.0` are activated". + // + // Another example after several backtracks we may have: + // + // con_from_dep[`foo = ">=0.8.2, <=0.9.3"`] = vec![ + // map!{`foo=0.8.1`: Semver, `foo=0.9.4`: Semver}, + // ]; + // + // This can be read as "we cannot find a candidate for dep `foo = ">=0.8.2, + // <=0.9.3"` if both `foo=0.8.1` AND `foo=0.9.4` are activated". + // + // This is used to make sure we don't queue work we know will fail. See the + // discussion in https://github.com/rust-lang/cargo/pull/5168 for why this + // is so important, and there can probably be a better data structure here + // but for now this works well enough! + // + // Also, as a final note, this map is *not* ever removed from. This remains + // as a global cache which we never delete from. Any entry in this map is + // unconditionally true regardless of our resolution history of how we got + // here. + con_from_dep: HashMap>>, + // `past_conflict_triggers` is an + // of `past_conflicting_activations`. + // For every `PackageId` this lists the `Dependency`s that mention it in `past_conflicting_activations`. + dep_from_pid: HashMap>, +} + +impl ConflictCache { + pub fn new() -> ConflictCache { + ConflictCache { + con_from_dep: HashMap::new(), + dep_from_pid: HashMap::new(), + } + } + /// Finds any known set of conflicts, if any, + /// which are activated in `cx` and pass the `filter` specified? + pub fn find_conflicting( + &self, + cx: &Context, + dep: &Dependency, + filter: F, + ) -> Option<&HashMap> + where + for<'r> F: FnMut(&'r &HashMap) -> bool, + { + self.con_from_dep + .get(dep)? + .iter() + .filter(filter) + .find(|conflicting| cx.is_conflicting(None, conflicting)) + } + pub fn conflicting( + &self, + cx: &Context, + dep: &Dependency, + ) -> Option<&HashMap> { + self.find_conflicting(cx, dep, |_| true) + } + + /// Add to the cache a conflict of the form: + /// `dep` is known to be unresolvable if + /// all the `PackageId` entries are activated + pub fn insert(&mut self, dep: &Dependency, con: &HashMap) { + let past = self.con_from_dep + .entry(dep.clone()) + .or_insert_with(Vec::new); + if !past.contains(con) { + trace!("{} adding a skip {:?}", dep.name(), con); + past.push(con.clone()); + for c in con.keys() { + self.dep_from_pid + .entry(c.clone()) + .or_insert_with(HashSet::new) + .insert(dep.clone()); + } + } + } + pub fn dependencies_conflicting_with(&self, pid: &PackageId) -> Option<&HashSet> { + self.dep_from_pid.get(pid) + } +} diff --git a/src/cargo/core/resolver/context.rs b/src/cargo/core/resolver/context.rs new file mode 100644 index 000000000..40590b5fd --- /dev/null +++ b/src/cargo/core/resolver/context.rs @@ -0,0 +1,425 @@ +use std::collections::{HashMap, HashSet}; +use std::rc::Rc; + +use core::interning::InternedString; +use core::{Dependency, FeatureValue, PackageId, SourceId, Summary}; +use util::CargoResult; +use util::Graph; + +use super::types::RegistryQueryer; +use super::types::{ActivateResult, ConflictReason, DepInfo, GraphNode, Method, RcList}; + +pub use super::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; +pub use super::encode::{Metadata, WorkspaceResolve}; +pub use super::resolve::Resolve; + +// A `Context` is basically a bunch of local resolution information which is +// kept around for all `BacktrackFrame` instances. As a result, this runs the +// risk of being cloned *a lot* so we want to make this as cheap to clone as +// possible. +#[derive(Clone)] +pub struct Context { + // TODO: Both this and the two maps below are super expensive to clone. We should + // switch to persistent hash maps if we can at some point or otherwise + // make these much cheaper to clone in general. + pub activations: Activations, + pub resolve_features: HashMap>>, + pub links: HashMap, + + // These are two cheaply-cloneable lists (O(1) clone) which are effectively + // hash maps but are built up as "construction lists". We'll iterate these + // at the very end and actually construct the map that we're making. + pub resolve_graph: RcList, + pub resolve_replacements: RcList<(PackageId, PackageId)>, + + // These warnings are printed after resolution. + pub warnings: RcList, +} + +pub type Activations = HashMap<(InternedString, SourceId), Rc>>; + +impl Context { + pub fn new() -> Context { + Context { + resolve_graph: RcList::new(), + resolve_features: HashMap::new(), + links: HashMap::new(), + resolve_replacements: RcList::new(), + activations: HashMap::new(), + warnings: RcList::new(), + } + } + + /// Activate this summary by inserting it into our list of known activations. + /// + /// Returns true if this summary with the given method is already activated. + pub fn flag_activated(&mut self, summary: &Summary, method: &Method) -> CargoResult { + let id = summary.package_id(); + let prev = self.activations + .entry((id.name(), id.source_id().clone())) + .or_insert_with(|| Rc::new(Vec::new())); + if !prev.iter().any(|c| c == summary) { + self.resolve_graph.push(GraphNode::Add(id.clone())); + if let Some(link) = summary.links() { + ensure!( + self.links.insert(link, id.clone()).is_none(), + "Attempting to resolve a with more then one crate with the links={}. \n\ + This will not build as is. Consider rebuilding the .lock file.", + &*link + ); + } + Rc::make_mut(prev).push(summary.clone()); + return Ok(false); + } + debug!("checking if {} is already activated", summary.package_id()); + let (features, use_default) = match *method { + Method::Everything + | Method::Required { + all_features: true, .. + } => return Ok(false), + Method::Required { + features, + uses_default_features, + .. + } => (features, uses_default_features), + }; + + let has_default_feature = summary.features().contains_key("default"); + Ok(match self.resolve_features.get(id) { + Some(prev) => { + features + .iter() + .all(|f| prev.contains(&InternedString::new(f))) + && (!use_default || prev.contains(&InternedString::new("default")) + || !has_default_feature) + } + None => features.is_empty() && (!use_default || !has_default_feature), + }) + } + + pub fn build_deps( + &mut self, + registry: &mut RegistryQueryer, + parent: Option<&Summary>, + candidate: &Summary, + method: &Method, + ) -> ActivateResult> { + // First, figure out our set of dependencies based on the requested set + // of features. This also calculates what features we're going to enable + // for our own dependencies. + let deps = self.resolve_features(parent, candidate, method)?; + + // Next, transform all dependencies into a list of possible candidates + // which can satisfy that dependency. + let mut deps = deps.into_iter() + .map(|(dep, features)| { + let candidates = registry.query(&dep)?; + Ok((dep, candidates, Rc::new(features))) + }) + .collect::>>()?; + + // Attempt to resolve dependencies with fewer candidates before trying + // dependencies with more candidates. This way if the dependency with + // only one candidate can't be resolved we don't have to do a bunch of + // work before we figure that out. + deps.sort_by_key(|&(_, ref a, _)| a.len()); + + Ok(deps) + } + + pub fn prev_active(&self, dep: &Dependency) -> &[Summary] { + self.activations + .get(&(dep.name(), dep.source_id().clone())) + .map(|v| &v[..]) + .unwrap_or(&[]) + } + + fn is_active(&self, id: &PackageId) -> bool { + self.activations + .get(&(id.name(), id.source_id().clone())) + .map(|v| v.iter().any(|s| s.package_id() == id)) + .unwrap_or(false) + } + + /// checks whether all of `parent` and the keys of `conflicting activations` + /// are still active + pub fn is_conflicting( + &self, + parent: Option<&PackageId>, + conflicting_activations: &HashMap, + ) -> bool { + conflicting_activations + .keys() + .chain(parent) + .all(|id| self.is_active(id)) + } + + /// Return all dependencies and the features we want from them. + fn resolve_features<'b>( + &mut self, + parent: Option<&Summary>, + s: &'b Summary, + method: &'b Method, + ) -> ActivateResult)>> { + let dev_deps = match *method { + Method::Everything => true, + Method::Required { dev_deps, .. } => dev_deps, + }; + + // First, filter by dev-dependencies + let deps = s.dependencies(); + let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps); + + // Requested features stored in the Method are stored as string references, but we want to + // transform them into FeatureValues here. In order to pass the borrow checker with + // storage of the FeatureValues that outlives the Requirements object, we do the + // transformation here, and pass the FeatureValues to build_requirements(). + let values = if let Method::Required { + all_features: false, + features: requested, + .. + } = *method + { + requested + .iter() + .map(|&f| FeatureValue::new(f, s)) + .collect::>() + } else { + vec![] + }; + let reqs = build_requirements(s, method, &values)?; + let mut ret = Vec::new(); + let mut used_features = HashSet::new(); + let default_dep = (false, Vec::new()); + + // Next, collect all actually enabled dependencies and their features. + for dep in deps { + // Skip optional dependencies, but not those enabled through a + // feature + if dep.is_optional() && !reqs.deps.contains_key(&*dep.name()) { + continue; + } + // So we want this dependency. Move the features we want from + // `feature_deps` to `ret` and register ourselves as using this + // name. + let base = reqs.deps.get(&*dep.name()).unwrap_or(&default_dep); + used_features.insert(dep.name().as_str()); + let always_required = !dep.is_optional() + && !s.dependencies() + .iter() + .any(|d| d.is_optional() && d.name() == dep.name()); + if always_required && base.0 { + self.warnings.push(format!( + "Package `{}` does not have feature `{}`. It has a required dependency \ + with that name, but only optional dependencies can be used as features. \ + This is currently a warning to ease the transition, but it will become an \ + error in the future.", + s.package_id(), + dep.name() + )); + } + let mut base = base.1.clone(); + base.extend(dep.features().iter()); + for feature in base.iter() { + if feature.contains('/') { + return Err( + format_err!("feature names may not contain slashes: `{}`", feature).into(), + ); + } + } + ret.push((dep.clone(), base)); + } + + // Any entries in `reqs.dep` which weren't used are bugs in that the + // package does not actually have those dependencies. We classified + // them as dependencies in the first place because there is no such + // feature, either. + let remaining = reqs.deps.keys() + .cloned() + .filter(|s| !used_features.contains(s)) + .collect::>(); + if !remaining.is_empty() { + let features = remaining.join(", "); + return Err(match parent { + None => format_err!( + "Package `{}` does not have these features: `{}`", + s.package_id(), + features + ).into(), + Some(p) => ( + p.package_id().clone(), + ConflictReason::MissingFeatures(features), + ).into(), + }); + } + + // Record what list of features is active for this package. + if !reqs.used.is_empty() { + let pkgid = s.package_id(); + + let set = Rc::make_mut( + self.resolve_features + .entry(pkgid.clone()) + .or_insert_with(|| Rc::new(HashSet::new())), + ); + + for feature in reqs.used { + set.insert(InternedString::new(feature)); + } + } + + Ok(ret) + } + + pub fn resolve_replacements(&self) -> HashMap { + let mut replacements = HashMap::new(); + let mut cur = &self.resolve_replacements; + while let Some(ref node) = cur.head { + let (k, v) = node.0.clone(); + replacements.insert(k, v); + cur = &node.1; + } + replacements + } + + pub fn graph(&self) -> Graph> { + let mut graph: Graph> = Graph::new(); + let mut cur = &self.resolve_graph; + while let Some(ref node) = cur.head { + match node.0 { + GraphNode::Add(ref p) => graph.add(p.clone()), + GraphNode::Link(ref a, ref b, ref dep) => { + graph.link(a.clone(), b.clone()).push(dep.clone()); + } + } + cur = &node.1; + } + graph + } +} + +/// Takes requested features for a single package from the input Method and +/// recurses to find all requested features, dependencies and requested +/// dependency features in a Requirements object, returning it to the resolver. +fn build_requirements<'a, 'b: 'a>( + s: &'a Summary, + method: &'b Method, + requested: &'a [FeatureValue], +) -> CargoResult> { + let mut reqs = Requirements::new(s); + for fv in requested.iter() { + reqs.require_value(fv)?; + } + match *method { + Method::Everything + | Method::Required { + all_features: true, .. + } => { + for key in s.features().keys() { + reqs.require_feature(key)?; + } + for dep in s.dependencies().iter().filter(|d| d.is_optional()) { + reqs.require_dependency(dep.name().as_str()); + } + } + _ => {} // Explicitly requested features are handled through `requested` + } + match *method { + Method::Everything + | Method::Required { + uses_default_features: true, + .. + } => { + if s.features().get("default").is_some() { + reqs.require_feature("default")?; + } + } + Method::Required { + uses_default_features: false, + .. + } => {} + } + Ok(reqs) +} + +struct Requirements<'a> { + summary: &'a Summary, + // The deps map is a mapping of package name to list of features enabled. + // Each package should be enabled, and each package should have the + // specified set of features enabled. The boolean indicates whether this + // package was specifically requested (rather than just requesting features + // *within* this package). + deps: HashMap<&'a str, (bool, Vec)>, + // The used features set is the set of features which this local package had + // enabled, which is later used when compiling to instruct the code what + // features were enabled. + used: HashSet<&'a str>, + visited: HashSet<&'a str>, +} + +impl<'r> Requirements<'r> { + fn new(summary: &Summary) -> Requirements { + Requirements { + summary, + deps: HashMap::new(), + used: HashSet::new(), + visited: HashSet::new(), + } + } + + fn require_crate_feature(&mut self, package: &'r str, feat: InternedString) { + self.used.insert(package); + self.deps + .entry(package) + .or_insert((false, Vec::new())) + .1 + .push(feat); + } + + fn seen(&mut self, feat: &'r str) -> bool { + if self.visited.insert(feat) { + self.used.insert(feat); + false + } else { + true + } + } + + fn require_dependency(&mut self, pkg: &'r str) { + if self.seen(pkg) { + return; + } + self.deps.entry(pkg).or_insert((false, Vec::new())).0 = true; + } + + fn require_feature(&mut self, feat: &'r str) -> CargoResult<()> { + if feat.is_empty() || self.seen(feat) { + return Ok(()); + } + for fv in self.summary + .features() + .get(feat) + .expect("must be a valid feature") + { + match *fv { + FeatureValue::Feature(ref dep_feat) if **dep_feat == *feat => bail!( + "Cyclic feature dependency: feature `{}` depends on itself", + feat + ), + _ => {} + } + self.require_value(fv)?; + } + Ok(()) + } + + fn require_value(&mut self, fv: &'r FeatureValue) -> CargoResult<()> { + match *fv { + FeatureValue::Feature(ref feat) => self.require_feature(feat), + FeatureValue::Crate(ref dep) => Ok(self.require_dependency(dep)), + FeatureValue::CrateFeature(ref dep, dep_feat) => { + Ok(self.require_crate_feature(dep, dep_feat)) + } + } + } +} diff --git a/src/cargo/core/resolver/encode.rs b/src/cargo/core/resolver/encode.rs new file mode 100644 index 000000000..36880c1b8 --- /dev/null +++ b/src/cargo/core/resolver/encode.rs @@ -0,0 +1,431 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::fmt; +use std::str::FromStr; + +use serde::ser; +use serde::de; + +use core::{Dependency, Package, PackageId, SourceId, Workspace}; +use util::{internal, Graph}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +use super::Resolve; + +#[derive(Serialize, Deserialize, Debug)] +pub struct EncodableResolve { + package: Option>, + /// `root` is optional to allow backward compatibility. + root: Option, + metadata: Option, + + #[serde(default, skip_serializing_if = "Patch::is_empty")] + patch: Patch, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +struct Patch { + unused: Vec, +} + +pub type Metadata = BTreeMap; + +impl EncodableResolve { + pub fn into_resolve(self, ws: &Workspace) -> CargoResult { + let path_deps = build_path_deps(ws); + + let packages = { + let mut packages = self.package.unwrap_or_default(); + if let Some(root) = self.root { + packages.insert(0, root); + } + packages + }; + + // `PackageId`s in the lock file don't include the `source` part + // for workspace members, so we reconstruct proper ids. + let (live_pkgs, all_pkgs) = { + let mut live_pkgs = HashMap::new(); + let mut all_pkgs = HashSet::new(); + for pkg in packages.iter() { + let enc_id = EncodablePackageId { + name: pkg.name.clone(), + version: pkg.version.clone(), + source: pkg.source.clone(), + }; + + if !all_pkgs.insert(enc_id.clone()) { + return Err(internal(format!( + "package `{}` is specified twice in the lockfile", + pkg.name + ))); + } + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + // We failed to find a local package in the workspace. + // It must have been removed and should be ignored. + None => { + debug!("path dependency now missing {} v{}", pkg.name, pkg.version); + continue; + } + Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?, + }; + + assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) + } + (live_pkgs, all_pkgs) + }; + + let lookup_id = |enc_id: &EncodablePackageId| -> CargoResult> { + match live_pkgs.get(enc_id) { + Some(&(ref id, _)) => Ok(Some(id.clone())), + None => if all_pkgs.contains(enc_id) { + // Package is found in the lockfile, but it is + // no longer a member of the workspace. + Ok(None) + } else { + Err(internal(format!( + "package `{}` is specified as a dependency, \ + but is missing from the package list", + enc_id + ))) + }, + } + }; + + let g = { + let mut g = Graph::new(); + + for &(ref id, _) in live_pkgs.values() { + g.add(id.clone()); + } + + for &(ref id, pkg) in live_pkgs.values() { + let deps = match pkg.dependencies { + Some(ref deps) => deps, + None => continue, + }; + + for edge in deps.iter() { + if let Some(to_depend_on) = lookup_id(edge)? { + g.link(id.clone(), to_depend_on); + } + } + } + g + }; + + let replacements = { + let mut replacements = HashMap::new(); + for &(ref id, pkg) in live_pkgs.values() { + if let Some(ref replace) = pkg.replace { + assert!(pkg.dependencies.is_none()); + if let Some(replace_id) = lookup_id(replace)? { + replacements.insert(id.clone(), replace_id); + } + } + } + replacements + }; + + let mut metadata = self.metadata.unwrap_or_default(); + + // Parse out all package checksums. After we do this we can be in a few + // situations: + // + // * We parsed no checksums. In this situation we're dealing with an old + // lock file and we're gonna fill them all in. + // * We parsed some checksums, but not one for all packages listed. It + // could have been the case that some were listed, then an older Cargo + // client added more dependencies, and now we're going to fill in the + // missing ones. + // * There are too many checksums listed, indicative of an older Cargo + // client removing a package but not updating the checksums listed. + // + // In all of these situations they're part of normal usage, so we don't + // really worry about it. We just try to slurp up as many checksums as + // possible. + let mut checksums = HashMap::new(); + let prefix = "checksum "; + let mut to_remove = Vec::new(); + for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { + to_remove.push(k.to_string()); + let k = &k[prefix.len()..]; + let enc_id: EncodablePackageId = k.parse() + .chain_err(|| internal("invalid encoding of checksum in lockfile"))?; + let id = match lookup_id(&enc_id) { + Ok(Some(id)) => id, + _ => continue, + }; + + let v = if v == "" { + None + } else { + Some(v.to_string()) + }; + checksums.insert(id, v); + } + + for k in to_remove { + metadata.remove(&k); + } + + let mut unused_patches = Vec::new(); + for pkg in self.patch.unused { + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + Some(src) => PackageId::new(&pkg.name, &pkg.version, src)?, + None => continue, + }; + unused_patches.push(id); + } + + Ok(Resolve::new( + g, + replacements, + HashMap::new(), + checksums, + metadata, + unused_patches, + )) + } +} + +fn build_path_deps(ws: &Workspace) -> HashMap { + // If a crate is *not* a path source, then we're probably in a situation + // such as `cargo install` with a lock file from a remote dependency. In + // that case we don't need to fixup any path dependencies (as they're not + // actually path dependencies any more), so we ignore them. + let members = ws.members() + .filter(|p| p.package_id().source_id().is_path()) + .collect::>(); + + let mut ret = HashMap::new(); + let mut visited = HashSet::new(); + for member in members.iter() { + ret.insert( + member.package_id().name().to_string(), + member.package_id().source_id().clone(), + ); + visited.insert(member.package_id().source_id().clone()); + } + for member in members.iter() { + build_pkg(member, ws, &mut ret, &mut visited); + } + for deps in ws.root_patch().values() { + for dep in deps { + build_dep(dep, ws, &mut ret, &mut visited); + } + } + for &(_, ref dep) in ws.root_replace() { + build_dep(dep, ws, &mut ret, &mut visited); + } + + return ret; + + fn build_pkg( + pkg: &Package, + ws: &Workspace, + ret: &mut HashMap, + visited: &mut HashSet, + ) { + for dep in pkg.dependencies() { + build_dep(dep, ws, ret, visited); + } + } + + fn build_dep( + dep: &Dependency, + ws: &Workspace, + ret: &mut HashMap, + visited: &mut HashSet, + ) { + let id = dep.source_id(); + if visited.contains(id) || !id.is_path() { + return; + } + let path = match id.url().to_file_path() { + Ok(p) => p.join("Cargo.toml"), + Err(_) => return, + }; + let pkg = match ws.load(&path) { + Ok(p) => p, + Err(_) => return, + }; + ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone()); + visited.insert(pkg.package_id().source_id().clone()); + build_pkg(&pkg, ws, ret, visited); + } +} + +impl Patch { + fn is_empty(&self) -> bool { + self.unused.is_empty() + } +} + +#[derive(Serialize, Deserialize, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct EncodableDependency { + name: String, + version: String, + source: Option, + dependencies: Option>, + replace: Option, +} + +#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] +pub struct EncodablePackageId { + name: String, + version: String, + source: Option, +} + +impl fmt::Display for EncodablePackageId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.name, self.version)?; + if let Some(ref s) = self.source { + write!(f, " ({})", s.to_url())?; + } + Ok(()) + } +} + +impl FromStr for EncodablePackageId { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut s = s.splitn(3, ' '); + let name = s.next().unwrap(); + let version = s.next() + .ok_or_else(|| internal("invalid serialized PackageId"))?; + let source_id = match s.next() { + Some(s) => { + if s.starts_with('(') && s.ends_with(')') { + Some(SourceId::from_url(&s[1..s.len() - 1])?) + } else { + bail!("invalid serialized PackageId") + } + } + None => None, + }; + + Ok(EncodablePackageId { + name: name.to_string(), + version: version.to_string(), + source: source_id, + }) + } +} + +impl ser::Serialize for EncodablePackageId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + s.collect_str(self) + } +} + +impl<'de> de::Deserialize<'de> for EncodablePackageId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + String::deserialize(d).and_then(|string| { + string + .parse::() + .map_err(de::Error::custom) + }) + } +} + +pub struct WorkspaceResolve<'a, 'cfg: 'a> { + pub ws: &'a Workspace<'cfg>, + pub resolve: &'a Resolve, +} + +impl<'a, 'cfg> ser::Serialize for WorkspaceResolve<'a, 'cfg> { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + let mut ids: Vec<_> = self.resolve.iter().collect(); + ids.sort(); + + let encodable = ids.iter() + .filter_map(|&id| Some(encodable_resolve_node(id, self.resolve))) + .collect::>(); + + let mut metadata = self.resolve.metadata().clone(); + + for id in ids.iter().filter(|id| !id.source_id().is_path()) { + let checksum = match self.resolve.checksums()[*id] { + Some(ref s) => &s[..], + None => "", + }; + let id = encodable_package_id(id); + metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string()); + } + + let metadata = if metadata.is_empty() { + None + } else { + Some(metadata) + }; + + let patch = Patch { + unused: self.resolve + .unused_patches() + .iter() + .map(|id| EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: None, + replace: None, + }) + .collect(), + }; + EncodableResolve { + package: Some(encodable), + root: None, + metadata, + patch, + }.serialize(s) + } +} + +fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency { + let (replace, deps) = match resolve.replacement(id) { + Some(id) => (Some(encodable_package_id(id)), None), + None => { + let mut deps = resolve + .deps_not_replaced(id) + .map(encodable_package_id) + .collect::>(); + deps.sort(); + (None, Some(deps)) + } + }; + + EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: deps, + replace, + } +} + +fn encodable_package_id(id: &PackageId) -> EncodablePackageId { + EncodablePackageId { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()).map(|s| s.with_precise(None)), + } +} + +fn encode_source(id: &SourceId) -> Option { + if id.is_path() { + None + } else { + Some(id.clone()) + } +} diff --git a/src/cargo/core/resolver/mod.rs b/src/cargo/core/resolver/mod.rs new file mode 100644 index 000000000..e153daf08 --- /dev/null +++ b/src/cargo/core/resolver/mod.rs @@ -0,0 +1,1100 @@ +//! Resolution of the entire dependency graph for a crate +//! +//! This module implements the core logic in taking the world of crates and +//! constraints and creating a resolved graph with locked versions for all +//! crates and their dependencies. This is separate from the registry module +//! which is more worried about discovering crates from various sources, this +//! module just uses the Registry trait as a source to learn about crates from. +//! +//! Actually solving a constraint graph is an NP-hard problem. This algorithm +//! is basically a nice heuristic to make sure we get roughly the best answer +//! most of the time. The constraints that we're working with are: +//! +//! 1. Each crate can have any number of dependencies. Each dependency can +//! declare a version range that it is compatible with. +//! 2. Crates can be activated with multiple version (e.g. show up in the +//! dependency graph twice) so long as each pairwise instance have +//! semver-incompatible versions. +//! +//! The algorithm employed here is fairly simple, we simply do a DFS, activating +//! the "newest crate" (highest version) first and then going to the next +//! option. The heuristics we employ are: +//! +//! * Never try to activate a crate version which is incompatible. This means we +//! only try crates which will actually satisfy a dependency and we won't ever +//! try to activate a crate that's semver compatible with something else +//! activated (as we're only allowed to have one) nor try to activate a crate +//! that has the same links attribute as something else +//! activated. +//! * Always try to activate the highest version crate first. The default +//! dependency in Cargo (e.g. when you write `foo = "0.1.2"`) is +//! semver-compatible, so selecting the highest version possible will allow us +//! to hopefully satisfy as many dependencies at once. +//! +//! Beyond that, what's implemented below is just a naive backtracking version +//! which should in theory try all possible combinations of dependencies and +//! versions to see if one works. The first resolution that works causes +//! everything to bail out immediately and return success, and only if *nothing* +//! works do we actually return an error up the stack. +//! +//! ## Performance +//! +//! Note that this is a relatively performance-critical portion of Cargo. The +//! data that we're processing is proportional to the size of the dependency +//! graph, which can often be quite large (e.g. take a look at Servo). To make +//! matters worse the DFS algorithm we're implemented is inherently quite +//! inefficient. When we add the requirement of backtracking on top it means +//! that we're implementing something that probably shouldn't be allocating all +//! over the place. + +use std::collections::{BTreeMap, BinaryHeap, HashMap, HashSet}; +use std::mem; +use std::rc::Rc; +use std::time::{Duration, Instant}; + +use semver; + +use core::{Dependency, PackageId, Registry, Summary}; +use core::PackageIdSpec; +use core::interning::InternedString; +use util::config::Config; +use util::errors::{CargoError, CargoResult}; +use util::profile; + +use self::context::{Activations, Context}; +use self::types::{ActivateError, ActivateResult, Candidate, ConflictReason, DepsFrame, GraphNode}; +use self::types::{RcVecIter, RegistryQueryer}; + +pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; +pub use self::encode::{Metadata, WorkspaceResolve}; +pub use self::resolve::{Deps, DepsNotReplaced, Resolve}; +pub use self::types::Method; + +mod context; +mod encode; +mod conflict_cache; +mod resolve; +mod types; + +/// Builds the list of all packages required to build the first argument. +/// +/// * `summaries` - the list of package summaries along with how to resolve +/// their features. This is a list of all top-level packages that are intended +/// to be part of the lock file (resolve output). These typically are a list +/// of all workspace members. +/// +/// * `replacements` - this is a list of `[replace]` directives found in the +/// root of the workspace. The list here is a `PackageIdSpec` of what to +/// replace and a `Dependency` to replace that with. In general it's not +/// recommended to use `[replace]` any more and use `[patch]` instead, which +/// is supported elsewhere. +/// +/// * `registry` - this is the source from which all package summaries are +/// loaded. It's expected that this is extensively configured ahead of time +/// and is idempotent with our requests to it (aka returns the same results +/// for the same query every time). Typically this is an instance of a +/// `PackageRegistry`. +/// +/// * `try_to_use` - this is a list of package ids which were previously found +/// in the lock file. We heuristically prefer the ids listed in `try_to_use` +/// when sorting candidates to activate, but otherwise this isn't used +/// anywhere else. +/// +/// * `config` - a location to print warnings and such, or `None` if no warnings +/// should be printed +/// +/// * `print_warnings` - whether or not to print backwards-compatibility +/// warnings and such +pub fn resolve( + summaries: &[(Summary, Method)], + replacements: &[(PackageIdSpec, Dependency)], + registry: &mut Registry, + try_to_use: &HashSet<&PackageId>, + config: Option<&Config>, + print_warnings: bool, +) -> CargoResult { + let cx = Context::new(); + let _p = profile::start("resolving"); + let minimal_versions = match config { + Some(config) => config.cli_unstable().minimal_versions, + None => false, + }; + let mut registry = RegistryQueryer::new(registry, replacements, try_to_use, minimal_versions); + let cx = activate_deps_loop(cx, &mut registry, summaries, config)?; + + let mut cksums = HashMap::new(); + for summary in cx.activations.values().flat_map(|v| v.iter()) { + let cksum = summary.checksum().map(|s| s.to_string()); + cksums.insert(summary.package_id().clone(), cksum); + } + let resolve = Resolve::new( + cx.graph(), + cx.resolve_replacements(), + cx.resolve_features + .iter() + .map(|(k, v)| (k.clone(), v.iter().map(|x| x.to_string()).collect())) + .collect(), + cksums, + BTreeMap::new(), + Vec::new(), + ); + + check_cycles(&resolve, &cx.activations)?; + trace!("resolved: {:?}", resolve); + + // If we have a shell, emit warnings about required deps used as feature. + if let Some(config) = config { + if print_warnings { + let mut shell = config.shell(); + let mut warnings = &cx.warnings; + while let Some(ref head) = warnings.head { + shell.warn(&head.0)?; + warnings = &head.1; + } + } + } + + Ok(resolve) +} + +/// Recursively activates the dependencies for `top`, in depth-first order, +/// backtracking across possible candidates for each dependency as necessary. +/// +/// If all dependencies can be activated and resolved to a version in the +/// dependency graph, cx.resolve is returned. +fn activate_deps_loop( + mut cx: Context, + registry: &mut RegistryQueryer, + summaries: &[(Summary, Method)], + config: Option<&Config>, +) -> CargoResult { + // Note that a `BinaryHeap` is used for the remaining dependencies that need + // activation. This heap is sorted such that the "largest value" is the most + // constrained dependency, or the one with the least candidates. + // + // This helps us get through super constrained portions of the dependency + // graph quickly and hopefully lock down what later larger dependencies can + // use (those with more candidates). + let mut backtrack_stack = Vec::new(); + let mut remaining_deps = BinaryHeap::new(); + + // `past_conflicting_activations` is a cache of the reasons for each time we + // backtrack. + let mut past_conflicting_activations = conflict_cache::ConflictCache::new(); + + // Activate all the initial summaries to kick off some work. + for &(ref summary, ref method) in summaries { + debug!("initial activation: {}", summary.package_id()); + let candidate = Candidate { + summary: summary.clone(), + replace: None, + }; + let res = activate(&mut cx, registry, None, candidate, method); + match res { + Ok(Some((frame, _))) => remaining_deps.push(frame), + Ok(None) => (), + Err(ActivateError::Fatal(e)) => return Err(e), + Err(ActivateError::Conflict(_, _)) => panic!("bad error from activate"), + } + } + + let mut ticks = 0; + let start = Instant::now(); + let time_to_print = Duration::from_millis(500); + let mut printed = false; + let mut deps_time = Duration::new(0, 0); + + // Main resolution loop, this is the workhorse of the resolution algorithm. + // + // You'll note that a few stacks are maintained on the side, which might + // seem odd when this algorithm looks like it could be implemented + // recursively. While correct, this is implemented iteratively to avoid + // blowing the stack (the recursion depth is proportional to the size of the + // input). + // + // The general sketch of this loop is to run until there are no dependencies + // left to activate, and for each dependency to attempt to activate all of + // its own dependencies in turn. The `backtrack_stack` is a side table of + // backtracking states where if we hit an error we can return to in order to + // attempt to continue resolving. + while let Some(mut deps_frame) = remaining_deps.pop() { + // If we spend a lot of time here (we shouldn't in most cases) then give + // a bit of a visual indicator as to what we're doing. Only enable this + // when stderr is a tty (a human is likely to be watching) to ensure we + // get deterministic output otherwise when observed by tools. + // + // Also note that we hit this loop a lot, so it's fairly performance + // sensitive. As a result try to defer a possibly expensive operation + // like `Instant::now` by only checking every N iterations of this loop + // to amortize the cost of the current time lookup. + ticks += 1; + if let Some(config) = config { + if config.shell().is_err_tty() && !printed && ticks % 1000 == 0 + && start.elapsed() - deps_time > time_to_print + { + printed = true; + config.shell().status("Resolving", "dependency graph...")?; + } + } + + let just_here_for_the_error_messages = deps_frame.just_for_error_messages; + + // Figure out what our next dependency to activate is, and if nothing is + // listed then we're entirely done with this frame (yay!) and we can + // move on to the next frame. + let frame = match deps_frame.remaining_siblings.next() { + Some(sibling) => { + let parent = Summary::clone(&deps_frame.parent); + remaining_deps.push(deps_frame); + (parent, sibling) + } + None => continue, + }; + let (mut parent, (mut cur, (mut dep, candidates, mut features))) = frame; + assert!(!remaining_deps.is_empty()); + + trace!( + "{}[{}]>{} {} candidates", + parent.name(), + cur, + dep.name(), + candidates.len() + ); + trace!( + "{}[{}]>{} {} prev activations", + parent.name(), + cur, + dep.name(), + cx.prev_active(&dep).len() + ); + + let just_here_for_the_error_messages = just_here_for_the_error_messages + && past_conflicting_activations + .conflicting(&cx, &dep) + .is_some(); + + let mut remaining_candidates = RemainingCandidates::new(&candidates); + + // `conflicting_activations` stores all the reasons we were unable to + // activate candidates. One of these reasons will have to go away for + // backtracking to find a place to restart. It is also the list of + // things to explain in the error message if we fail to resolve. + // + // This is a map of package id to a reason why that packaged caused a + // conflict for us. + let mut conflicting_activations = HashMap::new(); + + // When backtracking we don't fully update `conflicting_activations` + // especially for the cases that we didn't make a backtrack frame in the + // first place. This `backtracked` var stores whether we are continuing + // from a restored backtrack frame so that we can skip caching + // `conflicting_activations` in `past_conflicting_activations` + let mut backtracked = false; + + loop { + let next = remaining_candidates.next(&cx, &dep); + + let (candidate, has_another) = next.or_else(|conflicting| { + // If we get here then our `remaining_candidates` was just + // exhausted, so `dep` failed to activate. + // + // It's our job here to backtrack, if possible, and find a + // different candidate to activate. If we can't find any + // candidates whatsoever then it's time to bail entirely. + trace!("{}[{}]>{} -- no candidates", parent.name(), cur, dep.name()); + + // Add all the reasons to our frame's list of conflicting + // activations, as we may use this to start backtracking later. + conflicting_activations.extend(conflicting); + + // Use our list of `conflicting_activations` to add to our + // global list of past conflicting activations, effectively + // globally poisoning `dep` if `conflicting_activations` ever + // shows up again. We'll use the `past_conflicting_activations` + // below to determine if a dependency is poisoned and skip as + // much work as possible. + // + // If we're only here for the error messages then there's no + // need to try this as this dependency is already known to be + // bad. + // + // As we mentioned above with the `backtracked` variable if this + // local is set to `true` then our `conflicting_activations` may + // not be right, so we can't push into our global cache. + if !just_here_for_the_error_messages && !backtracked { + past_conflicting_activations.insert(&dep, &conflicting_activations); + } + + match find_candidate(&mut backtrack_stack, &parent, &conflicting_activations) { + Some((candidate, has_another, frame)) => { + // Reset all of our local variables used with the + // contents of `frame` to complete our backtrack. + cur = frame.cur; + cx = frame.context_backup; + remaining_deps = frame.deps_backup; + remaining_candidates = frame.remaining_candidates; + parent = frame.parent; + dep = frame.dep; + features = frame.features; + conflicting_activations = frame.conflicting_activations; + backtracked = true; + Ok((candidate, has_another)) + } + None => { + debug!("no candidates found"); + Err(activation_error( + &cx, + registry.registry, + &parent, + &dep, + &conflicting_activations, + &candidates, + config, + )) + } + } + })?; + + // If we're only here for the error messages then we know that this + // activation will fail one way or another. To that end if we've got + // more candidates we want to fast-forward to the last one as + // otherwise we'll just backtrack here anyway (helping us to skip + // some work). + if just_here_for_the_error_messages && !backtracked && has_another { + continue; + } + + // We have a `candidate`. Create a `BacktrackFrame` so we can add it + // to the `backtrack_stack` later if activation succeeds. + // + // Note that if we don't actually have another candidate then there + // will be nothing to backtrack to so we skip construction of the + // frame. This is a relatively important optimization as a number of + // the `clone` calls below can be quite expensive, so we avoid them + // if we can. + let backtrack = if has_another { + Some(BacktrackFrame { + cur, + context_backup: Context::clone(&cx), + deps_backup: >::clone(&remaining_deps), + remaining_candidates: remaining_candidates.clone(), + parent: Summary::clone(&parent), + dep: Dependency::clone(&dep), + features: Rc::clone(&features), + conflicting_activations: conflicting_activations.clone(), + }) + } else { + None + }; + + let pid = candidate.summary.package_id().clone(); + let method = Method::Required { + dev_deps: false, + features: &features, + all_features: false, + uses_default_features: dep.uses_default_features(), + }; + trace!( + "{}[{}]>{} trying {}", + parent.name(), + cur, + dep.name(), + candidate.summary.version() + ); + let res = activate( + &mut cx, + registry, + Some((&parent, &dep)), + candidate, + &method, + ); + + let successfully_activated = match res { + // Success! We've now activated our `candidate` in our context + // and we're almost ready to move on. We may want to scrap this + // frame in the end if it looks like it's not going to end well, + // so figure that out here. + Ok(Some((mut frame, dur))) => { + deps_time += dur; + + // Our `frame` here is a new package with its own list of + // dependencies. Do a sanity check here of all those + // dependencies by cross-referencing our global + // `past_conflicting_activations`. Recall that map is a + // global cache which lists sets of packages where, when + // activated, the dependency is unresolvable. + // + // If any our our frame's dependencies fit in that bucket, + // aka known unresolvable, then we extend our own set of + // conflicting activations with theirs. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves can't be activated, so we know that they + // conflict with us. + let mut has_past_conflicting_dep = just_here_for_the_error_messages; + if !has_past_conflicting_dep { + if let Some(conflicting) = frame + .remaining_siblings + .clone() + .filter_map(|(_, (ref new_dep, _, _))| { + past_conflicting_activations.conflicting(&cx, new_dep) + }) + .next() + { + // If one of our deps is known unresolvable + // then we will not succeed. + // How ever if we are part of the reason that + // one of our deps conflicts then + // we can make a stronger statement + // because we will definitely be activated when + // we try our dep. + conflicting_activations.extend( + conflicting + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(p, r)| (p.clone(), r.clone())), + ); + + has_past_conflicting_dep = true; + } + } + // If any of `remaining_deps` are known unresolvable with + // us activated, then we extend our own set of + // conflicting activations with theirs and its parent. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves are incompatible with that dep, so we know that deps + // parent conflict with us. + if !has_past_conflicting_dep { + if let Some(known_related_bad_deps) = + past_conflicting_activations.dependencies_conflicting_with(&pid) + { + if let Some((other_parent, conflict)) = remaining_deps + .iter() + .flat_map(|other| other.flatten()) + // for deps related to us + .filter(|&(_, ref other_dep)| + known_related_bad_deps.contains(other_dep)) + .filter_map(|(other_parent, other_dep)| { + past_conflicting_activations + .find_conflicting( + &cx, + &other_dep, + |con| con.contains_key(&pid) + ) + .map(|con| (other_parent, con)) + }) + .next() + { + let rel = conflict.get(&pid).unwrap().clone(); + + // The conflict we found is + // "other dep will not succeed if we are activated." + // We want to add + // "our dep will not succeed if other dep is in remaining_deps" + // but that is not how the cache is set up. + // So we add the less general but much faster, + // "our dep will not succeed if other dep's parent is activated". + conflicting_activations.extend( + conflict + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(p, r)| (p.clone(), r.clone())), + ); + conflicting_activations.insert(other_parent.clone(), rel); + has_past_conflicting_dep = true; + } + } + } + + // Ok if we're in a "known failure" state for this frame we + // may want to skip it altogether though. We don't want to + // skip it though in the case that we're displaying error + // messages to the user! + // + // Here we need to figure out if the user will see if we + // skipped this candidate (if it's known to fail, aka has a + // conflicting dep and we're the last candidate). If we're + // here for the error messages, we can't skip it (but we can + // prune extra work). If we don't have any candidates in our + // backtrack stack then we're the last line of defense, so + // we'll want to present an error message for sure. + let activate_for_error_message = has_past_conflicting_dep && !has_another && { + just_here_for_the_error_messages || { + conflicting_activations + .extend(remaining_candidates.conflicting_prev_active.clone()); + find_candidate( + &mut backtrack_stack.clone(), + &parent, + &conflicting_activations, + ).is_none() + } + }; + + // If we're only here for the error messages then we know + // one of our candidate deps will fail, meaning we will + // fail and that none of the backtrack frames will find a + // candidate that will help. Consequently let's clean up the + // no longer needed backtrack frames. + if activate_for_error_message { + backtrack_stack.clear(); + } + + // If we don't know for a fact that we'll fail or if we're + // just here for the error message then we push this frame + // onto our list of to-be-resolve, which will generate more + // work for us later on. + // + // Otherwise we're guaranteed to fail and were not here for + // error messages, so we skip work and don't push anything + // onto our stack. + frame.just_for_error_messages = has_past_conflicting_dep; + if !has_past_conflicting_dep || activate_for_error_message { + remaining_deps.push(frame); + true + } else { + trace!( + "{}[{}]>{} skipping {} ", + parent.name(), + cur, + dep.name(), + pid.version() + ); + false + } + } + + // This candidate's already activated, so there's no extra work + // for us to do. Let's keep going. + Ok(None) => true, + + // We failed with a super fatal error (like a network error), so + // bail out as quickly as possible as we can't reliably + // backtrack from errors like these + Err(ActivateError::Fatal(e)) => return Err(e), + + // We failed due to a bland conflict, bah! Record this in our + // frame's list of conflicting activations as to why this + // candidate failed, and then move on. + Err(ActivateError::Conflict(id, reason)) => { + conflicting_activations.insert(id, reason); + false + } + }; + + // If we've successfully activated then save off the backtrack frame + // if one was created, and otherwise break out of the inner + // activation loop as we're ready to move to the next dependency + if successfully_activated { + backtrack_stack.extend(backtrack); + break; + } + + // We've failed to activate this dependency, oh dear! Our call to + // `activate` above may have altered our `cx` local variable, so + // restore it back if we've got a backtrack frame. + // + // If we don't have a backtrack frame then we're just using the `cx` + // for error messages anyway so we can live with a little + // imprecision. + if let Some(b) = backtrack { + cx = b.context_backup; + } + } + + // Ok phew, that loop was a big one! If we've broken out then we've + // successfully activated a candidate. Our stacks are all in place that + // we're ready to move on to the next dependency that needs activation, + // so loop back to the top of the function here. + } + + Ok(cx) +} + +/// Attempts to activate the summary `candidate` in the context `cx`. +/// +/// This function will pull dependency summaries from the registry provided, and +/// the dependencies of the package will be determined by the `method` provided. +/// If `candidate` was activated, this function returns the dependency frame to +/// iterate through next. +fn activate( + cx: &mut Context, + registry: &mut RegistryQueryer, + parent: Option<(&Summary, &Dependency)>, + candidate: Candidate, + method: &Method, +) -> ActivateResult> { + if let Some((parent, dep)) = parent { + cx.resolve_graph.push(GraphNode::Link( + parent.package_id().clone(), + candidate.summary.package_id().clone(), + dep.clone(), + )); + } + + let activated = cx.flag_activated(&candidate.summary, method)?; + + let candidate = match candidate.replace { + Some(replace) => { + cx.resolve_replacements.push(( + candidate.summary.package_id().clone(), + replace.package_id().clone(), + )); + if cx.flag_activated(&replace, method)? && activated { + return Ok(None); + } + trace!( + "activating {} (replacing {})", + replace.package_id(), + candidate.summary.package_id() + ); + replace + } + None => { + if activated { + return Ok(None); + } + trace!("activating {}", candidate.summary.package_id()); + candidate.summary + } + }; + + let now = Instant::now(); + let deps = cx.build_deps(registry, parent.map(|p| p.0), &candidate, method)?; + let frame = DepsFrame { + parent: candidate, + just_for_error_messages: false, + remaining_siblings: RcVecIter::new(Rc::new(deps)), + }; + Ok(Some((frame, now.elapsed()))) +} + +#[derive(Clone)] +struct BacktrackFrame { + cur: usize, + context_backup: Context, + deps_backup: BinaryHeap, + remaining_candidates: RemainingCandidates, + parent: Summary, + dep: Dependency, + features: Rc>, + conflicting_activations: HashMap, +} + +/// A helper "iterator" used to extract candidates within a current `Context` of +/// a dependency graph. +/// +/// This struct doesn't literally implement the `Iterator` trait (requires a few +/// more inputs) but in general acts like one. Each `RemainingCandidates` is +/// created with a list of candidates to choose from. When attempting to iterate +/// over the list of candidates only *valid* candidates are returned. Validity +/// is defined within a `Context`. +/// +/// Candidates passed to `new` may not be returned from `next` as they could be +/// filtered out, and if iteration stops a map of all packages which caused +/// filtered out candidates to be filtered out will be returned. +#[derive(Clone)] +struct RemainingCandidates { + remaining: RcVecIter, + // note: change to RcList or something if clone is to expensive + conflicting_prev_active: HashMap, + // This is a inlined peekable generator + has_another: Option, +} + +impl RemainingCandidates { + fn new(candidates: &Rc>) -> RemainingCandidates { + RemainingCandidates { + remaining: RcVecIter::new(Rc::clone(candidates)), + conflicting_prev_active: HashMap::new(), + has_another: None, + } + } + + /// Attempts to find another candidate to check from this list. + /// + /// This method will attempt to move this iterator forward, returning a + /// candidate that's possible to activate. The `cx` argument is the current + /// context which determines validity for candidates returned, and the `dep` + /// is the dependency listing that we're activating for. + /// + /// If successful a `(Candidate, bool)` pair will be returned. The + /// `Candidate` is the candidate to attempt to activate, and the `bool` is + /// an indicator of whether there are remaining candidates to try of if + /// we've reached the end of iteration. + /// + /// If we've reached the end of the iterator here then `Err` will be + /// returned. The error will contain a map of package id to conflict reason, + /// where each package id caused a candidate to be filtered out from the + /// original list for the reason listed. + fn next( + &mut self, + cx: &Context, + dep: &Dependency, + ) -> Result<(Candidate, bool), HashMap> { + let prev_active = cx.prev_active(dep); + + for (_, b) in self.remaining.by_ref() { + // The `links` key in the manifest dictates that there's only one + // package in a dependency graph, globally, with that particular + // `links` key. If this candidate links to something that's already + // linked to by a different package then we've gotta skip this. + if let Some(link) = b.summary.links() { + if let Some(a) = cx.links.get(&link) { + if a != b.summary.package_id() { + self.conflicting_prev_active + .entry(a.clone()) + .or_insert_with(|| ConflictReason::Links(link.to_string())); + continue; + } + } + } + + // Otherwise the condition for being a valid candidate relies on + // semver. Cargo dictates that you can't duplicate multiple + // semver-compatible versions of a crate. For example we can't + // simultaneously activate `foo 1.0.2` and `foo 1.2.0`. We can, + // however, activate `1.0.2` and `2.0.0`. + // + // Here we throw out our candidate if it's *compatible*, yet not + // equal, to all previously activated versions. + if let Some(a) = prev_active + .iter() + .find(|a| compatible(a.version(), b.summary.version())) + { + if *a != b.summary { + self.conflicting_prev_active + .entry(a.package_id().clone()) + .or_insert(ConflictReason::Semver); + continue; + } + } + + // Well if we made it this far then we've got a valid dependency. We + // want this iterator to be inherently "peekable" so we don't + // necessarily return the item just yet. Instead we stash it away to + // get returned later, and if we replaced something then that was + // actually the candidate to try first so we return that. + if let Some(r) = mem::replace(&mut self.has_another, Some(b)) { + return Ok((r, true)); + } + } + + // Alright we've entirely exhausted our list of candidates. If we've got + // something stashed away return that here (also indicating that there's + // nothing else). If nothing is stashed away we return the list of all + // conflicting activations, if any. + // + // TODO: can the `conflicting_prev_active` clone be avoided here? should + // panic if this is called twice and an error is already returned + self.has_another + .take() + .map(|r| (r, false)) + .ok_or_else(|| self.conflicting_prev_active.clone()) + } +} + +// Returns if `a` and `b` are compatible in the semver sense. This is a +// commutative operation. +// +// Versions `a` and `b` are compatible if their left-most nonzero digit is the +// same. +fn compatible(a: &semver::Version, b: &semver::Version) -> bool { + if a.major != b.major { + return false; + } + if a.major != 0 { + return true; + } + if a.minor != b.minor { + return false; + } + if a.minor != 0 { + return true; + } + a.patch == b.patch +} + +/// Looks through the states in `backtrack_stack` for dependencies with +/// remaining candidates. For each one, also checks if rolling back +/// could change the outcome of the failed resolution that caused backtracking +/// in the first place. Namely, if we've backtracked past the parent of the +/// failed dep, or any of the packages flagged as giving us trouble in +/// `conflicting_activations`. +/// +/// Read +/// For several more detailed explanations of the logic here. +fn find_candidate( + backtrack_stack: &mut Vec, + parent: &Summary, + conflicting_activations: &HashMap, +) -> Option<(Candidate, bool, BacktrackFrame)> { + while let Some(mut frame) = backtrack_stack.pop() { + let next = frame + .remaining_candidates + .next(&frame.context_backup, &frame.dep); + let (candidate, has_another) = match next { + Ok(pair) => pair, + Err(_) => continue, + }; + // When we're calling this method we know that `parent` failed to + // activate. That means that some dependency failed to get resolved for + // whatever reason, and all of those reasons (plus maybe some extras) + // are listed in `conflicting_activations`. + // + // This means that if all members of `conflicting_activations` are still + // active in this back up we know that we're guaranteed to not actually + // make any progress. As a result if we hit this condition we can + // completely skip this backtrack frame and move on to the next. + if frame + .context_backup + .is_conflicting(Some(parent.package_id()), conflicting_activations) + { + continue; + } + + return Some((candidate, has_another, frame)); + } + None +} + +fn activation_error( + cx: &Context, + registry: &mut Registry, + parent: &Summary, + dep: &Dependency, + conflicting_activations: &HashMap, + candidates: &[Candidate], + config: Option<&Config>, +) -> CargoError { + let graph = cx.graph(); + if !candidates.is_empty() { + let mut msg = format!("failed to select a version for `{}`.", dep.name()); + msg.push_str("\n ... required by "); + msg.push_str(&describe_path(&graph.path_to_top(parent.package_id()))); + + msg.push_str("\nversions that meet the requirements `"); + msg.push_str(&dep.version_req().to_string()); + msg.push_str("` are: "); + msg.push_str(&candidates + .iter() + .map(|v| v.summary.version()) + .map(|v| v.to_string()) + .collect::>() + .join(", ")); + + let mut conflicting_activations: Vec<_> = conflicting_activations.iter().collect(); + conflicting_activations.sort_unstable(); + let (links_errors, mut other_errors): (Vec<_>, Vec<_>) = conflicting_activations + .drain(..) + .rev() + .partition(|&(_, r)| r.is_links()); + + for &(p, r) in links_errors.iter() { + if let ConflictReason::Links(ref link) = *r { + msg.push_str("\n\nthe package `"); + msg.push_str(&*dep.name()); + msg.push_str("` links to the native library `"); + msg.push_str(link); + msg.push_str("`, but it conflicts with a previous package which links to `"); + msg.push_str(link); + msg.push_str("` as well:\n"); + } + msg.push_str(&describe_path(&graph.path_to_top(p))); + } + + let (features_errors, other_errors): (Vec<_>, Vec<_>) = other_errors + .drain(..) + .partition(|&(_, r)| r.is_missing_features()); + + for &(p, r) in features_errors.iter() { + if let ConflictReason::MissingFeatures(ref features) = *r { + msg.push_str("\n\nthe package `"); + msg.push_str(&*p.name()); + msg.push_str("` depends on `"); + msg.push_str(&*dep.name()); + msg.push_str("`, with features: `"); + msg.push_str(features); + msg.push_str("` but `"); + msg.push_str(&*dep.name()); + msg.push_str("` does not have these features.\n"); + } + // p == parent so the full path is redundant. + } + + if !other_errors.is_empty() { + msg.push_str( + "\n\nall possible versions conflict with \ + previously selected packages.", + ); + } + + for &(p, _) in other_errors.iter() { + msg.push_str("\n\n previously selected "); + msg.push_str(&describe_path(&graph.path_to_top(p))); + } + + msg.push_str("\n\nfailed to select a version for `"); + msg.push_str(&*dep.name()); + msg.push_str("` which could resolve this conflict"); + + return format_err!("{}", msg); + } + + // Once we're all the way down here, we're definitely lost in the + // weeds! We didn't actually find any candidates, so we need to + // give an error message that nothing was found. + // + // Note that we re-query the registry with a new dependency that + // allows any version so we can give some nicer error reporting + // which indicates a few versions that were actually found. + let all_req = semver::VersionReq::parse("*").unwrap(); + let mut new_dep = dep.clone(); + new_dep.set_version_req(all_req); + let mut candidates = match registry.query_vec(&new_dep) { + Ok(candidates) => candidates, + Err(e) => return e, + }; + candidates.sort_unstable_by(|a, b| b.version().cmp(a.version())); + + let mut msg = if !candidates.is_empty() { + let versions = { + let mut versions = candidates + .iter() + .take(3) + .map(|cand| cand.version().to_string()) + .collect::>(); + + if candidates.len() > 3 { + versions.push("...".into()); + } + + versions.join(", ") + }; + + let mut msg = format!( + "no matching version `{}` found for package `{}`\n\ + location searched: {}\n\ + versions found: {}\n", + dep.version_req(), + dep.name(), + dep.source_id(), + versions + ); + msg.push_str("required by "); + msg.push_str(&describe_path(&graph.path_to_top(parent.package_id()))); + + // If we have a path dependency with a locked version, then this may + // indicate that we updated a sub-package and forgot to run `cargo + // update`. In this case try to print a helpful error! + if dep.source_id().is_path() && dep.version_req().to_string().starts_with('=') { + msg.push_str( + "\nconsider running `cargo update` to update \ + a path dependency's locked version", + ); + } + + msg + } else { + let mut msg = format!( + "no matching package named `{}` found\n\ + location searched: {}\n", + dep.name(), + dep.source_id() + ); + msg.push_str("required by "); + msg.push_str(&describe_path(&graph.path_to_top(parent.package_id()))); + + msg + }; + + if let Some(config) = config { + if config.cli_unstable().offline { + msg.push_str( + "\nAs a reminder, you're using offline mode (-Z offline) \ + which can sometimes cause surprising resolution failures, \ + if this error is too confusing you may with to retry \ + without the offline flag.", + ); + } + } + + format_err!("{}", msg) +} + +/// Returns String representation of dependency chain for a particular `pkgid`. +fn describe_path(path: &[&PackageId]) -> String { + use std::fmt::Write; + let mut dep_path_desc = format!("package `{}`", path[0]); + for dep in path[1..].iter() { + write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); + } + dep_path_desc +} + +fn check_cycles(resolve: &Resolve, activations: &Activations) -> CargoResult<()> { + let summaries: HashMap<&PackageId, &Summary> = activations + .values() + .flat_map(|v| v.iter()) + .map(|s| (s.package_id(), s)) + .collect(); + + // Sort packages to produce user friendly deterministic errors. + let all_packages = resolve.iter().collect::>().into_sorted_vec(); + let mut checked = HashSet::new(); + for pkg in all_packages { + if !checked.contains(pkg) { + visit(resolve, pkg, &summaries, &mut HashSet::new(), &mut checked)? + } + } + return Ok(()); + + fn visit<'a>( + resolve: &'a Resolve, + id: &'a PackageId, + summaries: &HashMap<&'a PackageId, &Summary>, + visited: &mut HashSet<&'a PackageId>, + checked: &mut HashSet<&'a PackageId>, + ) -> CargoResult<()> { + // See if we visited ourselves + if !visited.insert(id) { + bail!( + "cyclic package dependency: package `{}` depends on itself. Cycle:\n{}", + id, + describe_path(&resolve.path_to_top(id)) + ); + } + + // If we've already checked this node no need to recurse again as we'll + // just conclude the same thing as last time, so we only execute the + // recursive step if we successfully insert into `checked`. + // + // Note that if we hit an intransitive dependency then we clear out the + // visitation list as we can't induce a cycle through transitive + // dependencies. + if checked.insert(id) { + let summary = summaries[id]; + for dep in resolve.deps_not_replaced(id) { + let is_transitive = summary + .dependencies() + .iter() + .any(|d| d.matches_id(dep) && d.is_transitive()); + let mut empty = HashSet::new(); + let visited = if is_transitive { + &mut *visited + } else { + &mut empty + }; + visit(resolve, dep, summaries, visited, checked)?; + + if let Some(id) = resolve.replacement(dep) { + visit(resolve, id, summaries, visited, checked)?; + } + } + } + + // Ok, we're done, no longer visiting our node any more + visited.remove(id); + Ok(()) + } +} diff --git a/src/cargo/core/resolver/resolve.rs b/src/cargo/core/resolver/resolve.rs new file mode 100644 index 000000000..728f693bf --- /dev/null +++ b/src/cargo/core/resolver/resolve.rs @@ -0,0 +1,292 @@ +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::iter::FromIterator; + +use url::Url; + +use core::{Dependency, PackageId, PackageIdSpec, Summary}; +use util::Graph; +use util::errors::CargoResult; +use util::graph::{Edges, Nodes}; + +use super::encode::Metadata; + +/// Represents a fully resolved package dependency graph. Each node in the graph +/// is a package and edges represent dependencies between packages. +/// +/// Each instance of `Resolve` also understands the full set of features used +/// for each package. +#[derive(PartialEq)] +pub struct Resolve { + /// A graph, whose vertices are packages and edges are dependency specifications + /// from Cargo.toml. We need a `Vec` here because the same package + /// might be present in both `[dependencies]` and `[build-dependencies]`. + graph: Graph>, + replacements: HashMap, + reverse_replacements: HashMap, + empty_features: HashSet, + features: HashMap>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, +} + +impl Resolve { + pub fn new( + graph: Graph>, + replacements: HashMap, + features: HashMap>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, + ) -> Resolve { + let reverse_replacements = replacements + .iter() + .map(|p| (p.1.clone(), p.0.clone())) + .collect(); + Resolve { + graph, + replacements, + features, + checksums, + metadata, + unused_patches, + empty_features: HashSet::new(), + reverse_replacements, + } + } + + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> { + self.graph.path_to_top(pkg) + } + + pub fn register_used_patches(&mut self, patches: &HashMap>) { + for summary in patches.values().flat_map(|v| v) { + if self.iter().any(|id| id == summary.package_id()) { + continue; + } + self.unused_patches.push(summary.package_id().clone()); + } + } + + pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> { + // Given a previous instance of resolve, it should be forbidden to ever + // have a checksums which *differ*. If the same package id has differing + // checksums, then something has gone wrong such as: + // + // * Something got seriously corrupted + // * A "mirror" isn't actually a mirror as some changes were made + // * A replacement source wasn't actually a replacment, some changes + // were made + // + // In all of these cases, we want to report an error to indicate that + // something is awry. Normal execution (esp just using crates.io) should + // never run into this. + for (id, cksum) in previous.checksums.iter() { + if let Some(mine) = self.checksums.get(id) { + if mine == cksum { + continue; + } + + // If the previous checksum wasn't calculated, the current + // checksum is `Some`. This may indicate that a source was + // erroneously replaced or was replaced with something that + // desires stronger checksum guarantees than can be afforded + // elsewhere. + if cksum.is_none() { + bail!( + "\ +checksum for `{}` was not previously calculated, but a checksum could now \ +be calculated + +this could be indicative of a few possible situations: + + * the source `{}` did not previously support checksums, + but was replaced with one that does + * newer Cargo implementations know how to checksum this source, but this + older implementation does not + * the lock file is corrupt +", + id, + id.source_id() + ) + + // If our checksum hasn't been calculated, then it could mean + // that future Cargo figured out how to checksum something or + // more realistically we were overridden with a source that does + // not have checksums. + } else if mine.is_none() { + bail!( + "\ +checksum for `{}` could not be calculated, but a checksum is listed in \ +the existing lock file + +this could be indicative of a few possible situations: + + * the source `{}` supports checksums, + but was replaced with one that doesn't + * the lock file is corrupt + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id, + id.source_id() + ) + + // If the checksums aren't equal, and neither is None, then they + // must both be Some, in which case the checksum now differs. + // That's quite bad! + } else { + bail!( + "\ +checksum for `{}` changed between lock files + +this could be indicative of a few possible errors: + + * the lock file is corrupt + * a replacement source in use (e.g. a mirror) returned a different checksum + * the source itself may be corrupt in one way or another + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id + ); + } + } + } + + // Be sure to just copy over any unknown metadata. + self.metadata = previous.metadata.clone(); + Ok(()) + } + + pub fn iter(&self) -> Nodes> { + self.graph.iter() + } + + pub fn deps(&self, pkg: &PackageId) -> Deps { + Deps { + edges: self.graph.edges(pkg), + resolve: self, + } + } + + pub fn deps_not_replaced(&self, pkg: &PackageId) -> DepsNotReplaced { + DepsNotReplaced { + edges: self.graph.edges(pkg), + } + } + + pub fn replacement(&self, pkg: &PackageId) -> Option<&PackageId> { + self.replacements.get(pkg) + } + + pub fn replacements(&self) -> &HashMap { + &self.replacements + } + + pub fn features(&self, pkg: &PackageId) -> &HashSet { + self.features.get(pkg).unwrap_or(&self.empty_features) + } + + pub fn features_sorted(&self, pkg: &PackageId) -> Vec<&str> { + let mut v = Vec::from_iter(self.features(pkg).iter().map(|s| s.as_ref())); + v.sort(); + v + } + + pub fn query(&self, spec: &str) -> CargoResult<&PackageId> { + PackageIdSpec::query_str(spec, self.iter()) + } + + pub fn unused_patches(&self) -> &[PackageId] { + &self.unused_patches + } + + pub fn checksums(&self) -> &HashMap> { + &self.checksums + } + + pub fn metadata(&self) -> &Metadata { + &self.metadata + } + + pub fn dependencies_listed(&self, from: &PackageId, to: &PackageId) -> &[Dependency] { + // We've got a dependency on `from` to `to`, but this dependency edge + // may be affected by [replace]. If the `to` package is listed as the + // target of a replacement (aka the key of a reverse replacement map) + // then we try to find our dependency edge through that. If that fails + // then we go down below assuming it's not replaced. + // + // Note that we don't treat `from` as if it's been replaced because + // that's where the dependency originates from, and we only replace + // targets of dependencies not the originator. + if let Some(replace) = self.reverse_replacements.get(to) { + if let Some(deps) = self.graph.edge(from, replace) { + return deps; + } + } + match self.graph.edge(from, to) { + Some(ret) => ret, + None => panic!("no Dependency listed for `{}` => `{}`", from, to), + } + } +} + +impl fmt::Debug for Resolve { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "graph: {:?}\n", self.graph)?; + write!(fmt, "\nfeatures: {{\n")?; + for (pkg, features) in &self.features { + write!(fmt, " {}: {:?}\n", pkg, features)?; + } + write!(fmt, "}}") + } +} + +pub struct Deps<'a> { + edges: Option>>, + resolve: &'a Resolve, +} + +impl<'a> Iterator for Deps<'a> { + type Item = (&'a PackageId, &'a [Dependency]); + + fn next(&mut self) -> Option<(&'a PackageId, &'a [Dependency])> { + let (id, deps) = self.edges.as_mut()?.next()?; + let id_ret = self.resolve.replacement(id).unwrap_or(id); + Some((id_ret, deps)) + } + + fn size_hint(&self) -> (usize, Option) { + // Note: Edges is actually a std::collections::hash_set::Iter, which + // is an ExactSizeIterator. + let len = self.edges.as_ref().map(ExactSizeIterator::len).unwrap_or(0); + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for Deps<'a> {} + +pub struct DepsNotReplaced<'a> { + edges: Option>>, +} + +impl<'a> Iterator for DepsNotReplaced<'a> { + type Item = &'a PackageId; + + fn next(&mut self) -> Option<&'a PackageId> { + Some(self.edges.as_mut()?.next()?.0) + } + + fn size_hint(&self) -> (usize, Option) { + // Note: Edges is actually a std::collections::hash_set::Iter, which + // is an ExactSizeIterator. + let len = self.edges.as_ref().map(ExactSizeIterator::len).unwrap_or(0); + (len, Some(len)) + } +} + +impl<'a> ExactSizeIterator for DepsNotReplaced<'a> {} diff --git a/src/cargo/core/resolver/types.rs b/src/cargo/core/resolver/types.rs new file mode 100644 index 000000000..543c72e7e --- /dev/null +++ b/src/cargo/core/resolver/types.rs @@ -0,0 +1,398 @@ +use std::cmp::Ordering; +use std::collections::{HashMap, HashSet}; +use std::ops::Range; +use std::rc::Rc; + +use core::{Dependency, PackageId, PackageIdSpec, Registry, Summary}; +use core::interning::InternedString; +use util::{CargoError, CargoResult}; + +pub struct RegistryQueryer<'a> { + pub registry: &'a mut (Registry + 'a), + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet<&'a PackageId>, + // TODO: with nll the Rc can be removed + cache: HashMap>>, + // If set the list of dependency candidates will be sorted by minimal + // versions first. That allows `cargo update -Z minimal-versions` which will + // specify minimum depedency versions to be used. + minimal_versions: bool, +} + +impl<'a> RegistryQueryer<'a> { + pub fn new( + registry: &'a mut Registry, + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet<&'a PackageId>, + minimal_versions: bool, + ) -> Self { + RegistryQueryer { + registry, + replacements, + cache: HashMap::new(), + try_to_use, + minimal_versions, + } + } + + /// Queries the `registry` to return a list of candidates for `dep`. + /// + /// This method is the location where overrides are taken into account. If + /// any candidates are returned which match an override then the override is + /// applied by performing a second query for what the override should + /// return. + pub fn query(&mut self, dep: &Dependency) -> CargoResult>> { + if let Some(out) = self.cache.get(dep).cloned() { + return Ok(out); + } + + let mut ret = Vec::new(); + self.registry.query(dep, &mut |s| { + ret.push(Candidate { + summary: s, + replace: None, + }); + })?; + for candidate in ret.iter_mut() { + let summary = &candidate.summary; + + let mut potential_matches = self.replacements + .iter() + .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); + + let &(ref spec, ref dep) = match potential_matches.next() { + None => continue, + Some(replacement) => replacement, + }; + debug!("found an override for {} {}", dep.name(), dep.version_req()); + + let mut summaries = self.registry.query_vec(dep)?.into_iter(); + let s = summaries.next().ok_or_else(|| { + format_err!( + "no matching package for override `{}` found\n\ + location searched: {}\n\ + version required: {}", + spec, + dep.source_id(), + dep.version_req() + ) + })?; + let summaries = summaries.collect::>(); + if !summaries.is_empty() { + let bullets = summaries + .iter() + .map(|s| format!(" * {}", s.package_id())) + .collect::>(); + bail!( + "the replacement specification `{}` matched \ + multiple packages:\n * {}\n{}", + spec, + s.package_id(), + bullets.join("\n") + ); + } + + // The dependency should be hard-coded to have the same name and an + // exact version requirement, so both of these assertions should + // never fail. + assert_eq!(s.version(), summary.version()); + assert_eq!(s.name(), summary.name()); + + let replace = if s.source_id() == summary.source_id() { + debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); + None + } else { + Some(s) + }; + let matched_spec = spec.clone(); + + // Make sure no duplicates + if let Some(&(ref spec, _)) = potential_matches.next() { + bail!( + "overlapping replacement specifications found:\n\n \ + * {}\n * {}\n\nboth specifications match: {}", + matched_spec, + spec, + summary.package_id() + ); + } + + for dep in summary.dependencies() { + debug!("\t{} => {}", dep.name(), dep.version_req()); + } + + candidate.replace = replace; + } + + // When we attempt versions for a package we'll want to do so in a + // sorted fashion to pick the "best candidates" first. Currently we try + // prioritized summaries (those in `try_to_use`) and failing that we + // list everything from the maximum version to the lowest version. + ret.sort_unstable_by(|a, b| { + let a_in_previous = self.try_to_use.contains(a.summary.package_id()); + let b_in_previous = self.try_to_use.contains(b.summary.package_id()); + let previous_cmp = a_in_previous.cmp(&b_in_previous).reverse(); + match previous_cmp { + Ordering::Equal => { + let cmp = a.summary.version().cmp(b.summary.version()); + if self.minimal_versions { + // Lower version ordered first. + cmp + } else { + // Higher version ordered first. + cmp.reverse() + } + } + _ => previous_cmp, + } + }); + + let out = Rc::new(ret); + + self.cache.insert(dep.clone(), out.clone()); + + Ok(out) + } +} + +#[derive(Clone, Copy)] +pub enum Method<'a> { + Everything, // equivalent to Required { dev_deps: true, all_features: true, .. } + Required { + dev_deps: bool, + features: &'a [InternedString], + all_features: bool, + uses_default_features: bool, + }, +} + +impl<'r> Method<'r> { + pub fn split_features(features: &[String]) -> Vec { + features + .iter() + .flat_map(|s| s.split_whitespace()) + .flat_map(|s| s.split(',')) + .filter(|s| !s.is_empty()) + .map(|s| InternedString::new(s)) + .collect::>() + } +} + +#[derive(Clone)] +pub struct Candidate { + pub summary: Summary, + pub replace: Option, +} + +#[derive(Clone)] +pub struct DepsFrame { + pub parent: Summary, + pub just_for_error_messages: bool, + pub remaining_siblings: RcVecIter, +} + +impl DepsFrame { + /// Returns the least number of candidates that any of this frame's siblings + /// has. + /// + /// The `remaining_siblings` array is already sorted with the smallest + /// number of candidates at the front, so we just return the number of + /// candidates in that entry. + fn min_candidates(&self) -> usize { + self.remaining_siblings + .clone() + .next() + .map(|(_, (_, candidates, _))| candidates.len()) + .unwrap_or(0) + } + + pub fn flatten<'s>(&'s self) -> Box + 's> { + // TODO: with impl Trait the Box can be removed + Box::new( + self.remaining_siblings + .clone() + .map(move |(_, (d, _, _))| (self.parent.package_id(), d)), + ) + } +} + +impl PartialEq for DepsFrame { + fn eq(&self, other: &DepsFrame) -> bool { + self.just_for_error_messages == other.just_for_error_messages + && self.min_candidates() == other.min_candidates() + } +} + +impl Eq for DepsFrame {} + +impl PartialOrd for DepsFrame { + fn partial_cmp(&self, other: &DepsFrame) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for DepsFrame { + fn cmp(&self, other: &DepsFrame) -> Ordering { + self.just_for_error_messages + .cmp(&other.just_for_error_messages) + .then_with(|| + // the frame with the sibling that has the least number of candidates + // needs to get bubbled up to the top of the heap we use below, so + // reverse comparison here. + self.min_candidates().cmp(&other.min_candidates()).reverse()) + } +} + +// Information about the dependencies for a crate, a tuple of: +// +// (dependency info, candidates, features activated) +pub type DepInfo = (Dependency, Rc>, Rc>); + +pub type ActivateResult = Result; + +pub enum ActivateError { + Fatal(CargoError), + Conflict(PackageId, ConflictReason), +} + +impl From<::failure::Error> for ActivateError { + fn from(t: ::failure::Error) -> Self { + ActivateError::Fatal(t) + } +} + +impl From<(PackageId, ConflictReason)> for ActivateError { + fn from(t: (PackageId, ConflictReason)) -> Self { + ActivateError::Conflict(t.0, t.1) + } +} + +/// All possible reasons that a package might fail to activate. +/// +/// We maintain a list of conflicts for error reporting as well as backtracking +/// purposes. Each reason here is why candidates may be rejected or why we may +/// fail to resolve a dependency. +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub enum ConflictReason { + /// There was a semver conflict, for example we tried to activate a package + /// 1.0.2 but 1.1.0 was already activated (aka a compatible semver version + /// is already activated) + Semver, + + /// The `links` key is being violated. For example one crate in the + /// dependency graph has `links = "foo"` but this crate also had that, and + /// we're only allowed one per dependency graph. + Links(String), + + /// A dependency listed features that weren't actually available on the + /// candidate. For example we tried to activate feature `foo` but the + /// candidiate we're activating didn't actually have the feature `foo`. + MissingFeatures(String), +} + +impl ConflictReason { + pub fn is_links(&self) -> bool { + if let ConflictReason::Links(_) = *self { + return true; + } + false + } + + pub fn is_missing_features(&self) -> bool { + if let ConflictReason::MissingFeatures(_) = *self { + return true; + } + false + } +} + +pub struct RcVecIter { + vec: Rc>, + rest: Range, +} + +impl RcVecIter { + pub fn new(vec: Rc>) -> RcVecIter { + RcVecIter { + rest: 0..vec.len(), + vec, + } + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcVecIter { + fn clone(&self) -> RcVecIter { + RcVecIter { + vec: self.vec.clone(), + rest: self.rest.clone(), + } + } +} + +impl Iterator for RcVecIter +where + T: Clone, +{ + type Item = (usize, T); + + fn next(&mut self) -> Option<(usize, T)> { + self.rest + .next() + .and_then(|i| self.vec.get(i).map(|val| (i, val.clone()))) + } + + fn size_hint(&self) -> (usize, Option) { + // rest is a std::ops::Range, which is an ExactSizeIterator. + self.rest.size_hint() + } +} + +impl ExactSizeIterator for RcVecIter {} + +pub struct RcList { + pub head: Option)>>, +} + +impl RcList { + pub fn new() -> RcList { + RcList { head: None } + } + + pub fn push(&mut self, data: T) { + let node = Rc::new(( + data, + RcList { + head: self.head.take(), + }, + )); + self.head = Some(node); + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcList { + fn clone(&self) -> RcList { + RcList { + head: self.head.clone(), + } + } +} + +// Avoid stack overflows on drop by turning recursion into a loop +impl Drop for RcList { + fn drop(&mut self) { + let mut cur = self.head.take(); + while let Some(head) = cur { + match Rc::try_unwrap(head) { + Ok((_data, mut next)) => cur = next.head.take(), + Err(_) => break, + } + } + } +} + +pub enum GraphNode { + Add(PackageId), + Link(PackageId, PackageId, Dependency), +} diff --git a/src/cargo/core/shell.rs b/src/cargo/core/shell.rs new file mode 100644 index 000000000..8599e9e6d --- /dev/null +++ b/src/cargo/core/shell.rs @@ -0,0 +1,355 @@ +use std::fmt; +use std::io::prelude::*; + +use atty; +use termcolor::Color::{Cyan, Green, Red, Yellow}; +use termcolor::{self, Color, ColorSpec, StandardStream, WriteColor}; + +use util::errors::CargoResult; + +/// The requested verbosity of output +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Verbosity { + Verbose, + Normal, + Quiet, +} + +/// An abstraction around a `Write`able object that remembers preferences for output verbosity and +/// color. +pub struct Shell { + /// the `Write`able object, either with or without color support (represented by different enum + /// variants) + err: ShellOut, + /// How verbose messages should be + verbosity: Verbosity, +} + +impl fmt::Debug for Shell { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.err { + ShellOut::Write(_) => f.debug_struct("Shell") + .field("verbosity", &self.verbosity) + .finish(), + ShellOut::Stream { color_choice, .. } => f.debug_struct("Shell") + .field("verbosity", &self.verbosity) + .field("color_choice", &color_choice) + .finish(), + } + } +} + +/// A `Write`able object, either with or without color support +enum ShellOut { + /// A plain write object without color support + Write(Box), + /// Color-enabled stdio, with information on whether color should be used + Stream { + stream: StandardStream, + tty: bool, + color_choice: ColorChoice, + }, +} + +/// Whether messages should use color output +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ColorChoice { + /// Force color output + Always, + /// Force disable color output + Never, + /// Intelligently guess whether to use color output + CargoAuto, +} + +impl Shell { + /// Create a new shell (color choice and verbosity), defaulting to 'auto' color and verbose + /// output. + pub fn new() -> Shell { + Shell { + err: ShellOut::Stream { + stream: StandardStream::stderr(ColorChoice::CargoAuto.to_termcolor_color_choice()), + color_choice: ColorChoice::CargoAuto, + tty: atty::is(atty::Stream::Stderr), + }, + verbosity: Verbosity::Verbose, + } + } + + /// Create a shell from a plain writable object, with no color, and max verbosity. + pub fn from_write(out: Box) -> Shell { + Shell { + err: ShellOut::Write(out), + verbosity: Verbosity::Verbose, + } + } + + /// Print a message, where the status will have `color` color, and can be justified. The + /// messages follows without color. + fn print( + &mut self, + status: &fmt::Display, + message: Option<&fmt::Display>, + color: Color, + justified: bool, + ) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => self.err.print(status, message, color, justified), + } + } + + /// Returns the width of the terminal in spaces, if any + pub fn err_width(&self) -> Option { + match self.err { + ShellOut::Stream { tty: true, .. } => imp::stderr_width(), + _ => None, + } + } + + /// Returns whether stderr is a tty + pub fn is_err_tty(&self) -> bool { + match self.err { + ShellOut::Stream { tty, .. } => tty, + _ => false, + } + } + + /// Get a reference to the underlying writer + pub fn err(&mut self) -> &mut Write { + self.err.as_write() + } + + /// Shortcut to right-align and color green a status message. + pub fn status(&mut self, status: T, message: U) -> CargoResult<()> + where + T: fmt::Display, + U: fmt::Display, + { + self.print(&status, Some(&message), Green, true) + } + + pub fn status_header(&mut self, status: T) -> CargoResult<()> + where + T: fmt::Display, + { + self.print(&status, None, Cyan, true) + } + + /// Shortcut to right-align a status message. + pub fn status_with_color( + &mut self, + status: T, + message: U, + color: Color, + ) -> CargoResult<()> + where + T: fmt::Display, + U: fmt::Display, + { + self.print(&status, Some(&message), color, true) + } + + /// Run the callback only if we are in verbose mode + pub fn verbose(&mut self, mut callback: F) -> CargoResult<()> + where + F: FnMut(&mut Shell) -> CargoResult<()>, + { + match self.verbosity { + Verbosity::Verbose => callback(self), + _ => Ok(()), + } + } + + /// Run the callback if we are not in verbose mode. + pub fn concise(&mut self, mut callback: F) -> CargoResult<()> + where + F: FnMut(&mut Shell) -> CargoResult<()>, + { + match self.verbosity { + Verbosity::Verbose => Ok(()), + _ => callback(self), + } + } + + /// Print a red 'error' message + pub fn error(&mut self, message: T) -> CargoResult<()> { + self.print(&"error:", Some(&message), Red, false) + } + + /// Print an amber 'warning' message + pub fn warn(&mut self, message: T) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => self.print(&"warning:", Some(&message), Yellow, false), + } + } + + /// Update the verbosity of the shell + pub fn set_verbosity(&mut self, verbosity: Verbosity) { + self.verbosity = verbosity; + } + + /// Get the verbosity of the shell + pub fn verbosity(&self) -> Verbosity { + self.verbosity + } + + /// Update the color choice (always, never, or auto) from a string. + pub fn set_color_choice(&mut self, color: Option<&str>) -> CargoResult<()> { + if let ShellOut::Stream { + ref mut stream, + ref mut color_choice, + .. + } = self.err + { + let cfg = match color { + Some("always") => ColorChoice::Always, + Some("never") => ColorChoice::Never, + + Some("auto") | None => ColorChoice::CargoAuto, + + Some(arg) => bail!( + "argument for --color must be auto, always, or \ + never, but found `{}`", + arg + ), + }; + *color_choice = cfg; + *stream = StandardStream::stderr(cfg.to_termcolor_color_choice()); + } + Ok(()) + } + + /// Get the current color choice + /// + /// If we are not using a color stream, this will always return Never, even if the color choice + /// has been set to something else. + pub fn color_choice(&self) -> ColorChoice { + match self.err { + ShellOut::Stream { color_choice, .. } => color_choice, + ShellOut::Write(_) => ColorChoice::Never, + } + } +} + +impl Default for Shell { + fn default() -> Self { + Self::new() + } +} + +impl ShellOut { + /// Print out a message with a status. The status comes first and is bold + the given color. + /// The status can be justified, in which case the max width that will right align is 12 chars. + fn print( + &mut self, + status: &fmt::Display, + message: Option<&fmt::Display>, + color: Color, + justified: bool, + ) -> CargoResult<()> { + match *self { + ShellOut::Stream { ref mut stream, .. } => { + stream.reset()?; + stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(color)))?; + if justified { + write!(stream, "{:>12}", status)?; + } else { + write!(stream, "{}", status)?; + } + stream.reset()?; + match message { + Some(message) => write!(stream, " {}\n", message)?, + None => write!(stream, " ")?, + } + } + ShellOut::Write(ref mut w) => { + if justified { + write!(w, "{:>12}", status)?; + } else { + write!(w, "{}", status)?; + } + match message { + Some(message) => write!(w, " {}\n", message)?, + None => write!(w, " ")?, + } + } + } + Ok(()) + } + + /// Get this object as a `io::Write`. + fn as_write(&mut self) -> &mut Write { + match *self { + ShellOut::Stream { ref mut stream, .. } => stream, + ShellOut::Write(ref mut w) => w, + } + } +} + +impl ColorChoice { + /// Convert our color choice to termcolor's version + fn to_termcolor_color_choice(&self) -> termcolor::ColorChoice { + match *self { + ColorChoice::Always => termcolor::ColorChoice::Always, + ColorChoice::Never => termcolor::ColorChoice::Never, + ColorChoice::CargoAuto => { + if atty::is(atty::Stream::Stderr) { + termcolor::ColorChoice::Auto + } else { + termcolor::ColorChoice::Never + } + } + } + } +} + +#[cfg(any(target_os = "linux", target_os = "macos"))] +mod imp { + use std::mem; + + use libc; + + pub fn stderr_width() -> Option { + unsafe { + let mut winsize: libc::winsize = mem::zeroed(); + if libc::ioctl(libc::STDERR_FILENO, libc::TIOCGWINSZ, &mut winsize) < 0 { + return None; + } + if winsize.ws_col > 0 { + Some(winsize.ws_col as usize) + } else { + None + } + } + } +} + +#[cfg(all(unix, not(any(target_os = "linux", target_os = "macos"))))] +mod imp { + pub fn stderr_width() -> Option { + None + } +} + +#[cfg(windows)] +mod imp { + extern crate winapi; + + use std::mem; + use self::winapi::um::processenv::*; + use self::winapi::um::winbase::*; + use self::winapi::um::wincon::*; + + pub fn stderr_width() -> Option { + unsafe { + let stdout = GetStdHandle(STD_ERROR_HANDLE); + let mut csbi: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); + if GetConsoleScreenBufferInfo(stdout, &mut csbi) == 0 { + return None; + } + Some((csbi.srWindow.Right - csbi.srWindow.Left) as usize) + } + } +} diff --git a/src/cargo/core/source/mod.rs b/src/cargo/core/source/mod.rs new file mode 100644 index 000000000..c36480aab --- /dev/null +++ b/src/cargo/core/source/mod.rs @@ -0,0 +1,201 @@ +use std::collections::hash_map::{HashMap, IterMut, Values}; +use std::fmt; + +use core::{Dependency, Package, PackageId, Summary}; +use util::CargoResult; + +mod source_id; + +pub use self::source_id::{GitReference, SourceId}; + +/// A Source finds and downloads remote packages based on names and +/// versions. +pub trait Source { + /// Returns the `SourceId` corresponding to this source + fn source_id(&self) -> &SourceId; + + /// Returns whether or not this source will return summaries with + /// checksums listed. + fn supports_checksums(&self) -> bool; + + /// Returns whether or not this source will return summaries with + /// the `precise` field in the source id listed. + fn requires_precise(&self) -> bool; + + /// Attempt to find the packages that match a dependency request. + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()>; + + fn query_vec(&mut self, dep: &Dependency) -> CargoResult> { + let mut ret = Vec::new(); + self.query(dep, &mut |s| ret.push(s))?; + Ok(ret) + } + + /// The update method performs any network operations required to + /// get the entire list of all names, versions and dependencies of + /// packages managed by the Source. + fn update(&mut self) -> CargoResult<()>; + + /// The download method fetches the full package for each name and + /// version specified. + fn download(&mut self, package: &PackageId) -> CargoResult; + + /// Generates a unique string which represents the fingerprint of the + /// current state of the source. + /// + /// This fingerprint is used to determine the "fresheness" of the source + /// later on. It must be guaranteed that the fingerprint of a source is + /// constant if and only if the output product will remain constant. + /// + /// The `pkg` argument is the package which this fingerprint should only be + /// interested in for when this source may contain multiple packages. + fn fingerprint(&self, pkg: &Package) -> CargoResult; + + /// If this source supports it, verifies the source of the package + /// specified. + /// + /// Note that the source may also have performed other checksum-based + /// verification during the `download` step, but this is intended to be run + /// just before a crate is compiled so it may perform more expensive checks + /// which may not be cacheable. + fn verify(&self, _pkg: &PackageId) -> CargoResult<()> { + Ok(()) + } +} + +impl<'a, T: Source + ?Sized + 'a> Source for Box { + /// Forwards to `Source::supports_checksums` + fn supports_checksums(&self) -> bool { + (**self).supports_checksums() + } + + /// Forwards to `Source::requires_precise` + fn requires_precise(&self) -> bool { + (**self).requires_precise() + } + + /// Forwards to `Source::query` + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + (**self).query(dep, f) + } + + /// Forwards to `Source::source_id` + fn source_id(&self) -> &SourceId { + (**self).source_id() + } + + /// Forwards to `Source::update` + fn update(&mut self) -> CargoResult<()> { + (**self).update() + } + + /// Forwards to `Source::download` + fn download(&mut self, id: &PackageId) -> CargoResult { + (**self).download(id) + } + + /// Forwards to `Source::fingerprint` + fn fingerprint(&self, pkg: &Package) -> CargoResult { + (**self).fingerprint(pkg) + } + + /// Forwards to `Source::verify` + fn verify(&self, pkg: &PackageId) -> CargoResult<()> { + (**self).verify(pkg) + } +} + +/// A `HashMap` of `SourceId` -> `Box` +#[derive(Default)] +pub struct SourceMap<'src> { + map: HashMap>, +} + +// impl debug on source requires specialization, if even desirable at all +impl<'src> fmt::Debug for SourceMap<'src> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "SourceMap ")?; + f.debug_set().entries(self.map.keys()).finish() + } +} + +/// A `std::collection::hash_map::Values` for `SourceMap` +pub type Sources<'a, 'src> = Values<'a, SourceId, Box>; + +/// A `std::collection::hash_map::IterMut` for `SourceMap` +pub struct SourcesMut<'a, 'src: 'a> { + inner: IterMut<'a, SourceId, Box>, +} + +impl<'src> SourceMap<'src> { + /// Create an empty map + pub fn new() -> SourceMap<'src> { + SourceMap { + map: HashMap::new(), + } + } + + /// Like `HashMap::contains_key` + pub fn contains(&self, id: &SourceId) -> bool { + self.map.contains_key(id) + } + + /// Like `HashMap::get` + pub fn get(&self, id: &SourceId) -> Option<&(Source + 'src)> { + let source = self.map.get(id); + + source.map(|s| { + let s: &(Source + 'src) = &**s; + s + }) + } + + /// Like `HashMap::get_mut` + pub fn get_mut(&mut self, id: &SourceId) -> Option<&mut (Source + 'src)> { + self.map.get_mut(id).map(|s| { + let s: &mut (Source + 'src) = &mut **s; + s + }) + } + + /// Like `HashMap::get`, but first calculates the `SourceId` from a + /// `PackageId` + pub fn get_by_package_id(&self, pkg_id: &PackageId) -> Option<&(Source + 'src)> { + self.get(pkg_id.source_id()) + } + + /// Like `HashMap::insert`, but derives the SourceId key from the Source + pub fn insert(&mut self, source: Box) { + let id = source.source_id().clone(); + self.map.insert(id, source); + } + + /// Like `HashMap::is_empty` + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Like `HashMap::len` + pub fn len(&self) -> usize { + self.map.len() + } + + /// Like `HashMap::values` + pub fn sources<'a>(&'a self) -> Sources<'a, 'src> { + self.map.values() + } + + /// Like `HashMap::iter_mut` + pub fn sources_mut<'a>(&'a mut self) -> SourcesMut<'a, 'src> { + SourcesMut { + inner: self.map.iter_mut(), + } + } +} + +impl<'a, 'src> Iterator for SourcesMut<'a, 'src> { + type Item = (&'a SourceId, &'a mut (Source + 'src)); + fn next(&mut self) -> Option<(&'a SourceId, &'a mut (Source + 'src))> { + self.inner.next().map(|(a, b)| (a, &mut **b)) + } +} diff --git a/src/cargo/core/source/source_id.rs b/src/cargo/core/source/source_id.rs new file mode 100644 index 000000000..174a36531 --- /dev/null +++ b/src/cargo/core/source/source_id.rs @@ -0,0 +1,569 @@ +use std::cmp::{self, Ordering}; +use std::fmt::{self, Formatter}; +use std::hash::{self, Hash}; +use std::path::Path; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; +use std::sync::atomic::Ordering::SeqCst; + +use serde::ser; +use serde::de; +use url::Url; + +use ops; +use sources::git; +use sources::{GitSource, PathSource, RegistrySource, CRATES_IO}; +use sources::DirectorySource; +use util::{CargoResult, Config, ToUrl}; + +/// Unique identifier for a source of packages. +#[derive(Clone, Eq, Debug)] +pub struct SourceId { + inner: Arc, +} + +#[derive(Eq, Clone, Debug)] +struct SourceIdInner { + /// The source URL + url: Url, + /// `git::canonicalize_url(url)` for the url field + canonical_url: Url, + /// The source kind + kind: Kind, + // e.g. the exact git revision of the specified branch for a Git Source + precise: Option, + /// Name of the registry source for alternative registries + name: Option, +} + +/// The possible kinds of code source. Along with SourceIdInner this fully defines the +/// source +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +enum Kind { + /// Kind::Git() represents a git repository + Git(GitReference), + /// represents a local path + Path, + /// represents a remote registry + Registry, + /// represents a local filesystem-based registry + LocalRegistry, + /// represents a directory-based registry + Directory, +} + +/// Information to find a specific commit in a git repository +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum GitReference { + /// from a tag + Tag(String), + /// from the HEAD of a branch + Branch(String), + /// from a specific revision + Rev(String), +} + +impl SourceId { + /// Create a SourceId object from the kind and url. + /// + /// The canonical url will be calculated, but the precise field will not + fn new(kind: Kind, url: Url) -> CargoResult { + let source_id = SourceId { + inner: Arc::new(SourceIdInner { + kind, + canonical_url: git::canonicalize_url(&url)?, + url, + precise: None, + name: None, + }), + }; + Ok(source_id) + } + + /// Parses a source URL and returns the corresponding ID. + /// + /// ## Example + /// + /// ``` + /// use cargo::core::SourceId; + /// SourceId::from_url("git+https://github.com/alexcrichton/\ + /// libssh2-static-sys#80e71a3021618eb05\ + /// 656c58fb7c5ef5f12bc747f"); + /// ``` + pub fn from_url(string: &str) -> CargoResult { + let mut parts = string.splitn(2, '+'); + let kind = parts.next().unwrap(); + let url = parts + .next() + .ok_or_else(|| format_err!("invalid source `{}`", string))?; + + match kind { + "git" => { + let mut url = url.to_url()?; + let mut reference = GitReference::Branch("master".to_string()); + for (k, v) in url.query_pairs() { + match &k[..] { + // map older 'ref' to branch + "branch" | "ref" => reference = GitReference::Branch(v.into_owned()), + + "rev" => reference = GitReference::Rev(v.into_owned()), + "tag" => reference = GitReference::Tag(v.into_owned()), + _ => {} + } + } + let precise = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + url.set_query(None); + Ok(SourceId::for_git(&url, reference)?.with_precise(precise)) + } + "registry" => { + let url = url.to_url()?; + Ok(SourceId::new(Kind::Registry, url)?.with_precise(Some("locked".to_string()))) + } + "path" => { + let url = url.to_url()?; + SourceId::new(Kind::Path, url) + } + kind => Err(format_err!("unsupported source protocol: {}", kind)), + } + } + + /// A view of the `SourceId` that can be `Display`ed as a URL + pub fn to_url(&self) -> SourceIdToUrl { + SourceIdToUrl { + inner: &*self.inner, + } + } + + /// Create a SourceId from a filesystem path. + /// + /// Pass absolute path + pub fn for_path(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Path, url) + } + + /// Crate a SourceId from a git reference + pub fn for_git(url: &Url, reference: GitReference) -> CargoResult { + SourceId::new(Kind::Git(reference), url.clone()) + } + + /// Create a SourceId from a registry url + pub fn for_registry(url: &Url) -> CargoResult { + SourceId::new(Kind::Registry, url.clone()) + } + + /// Create a SourceId from a local registry path + pub fn for_local_registry(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::LocalRegistry, url) + } + + /// Create a SourceId from a directory path + pub fn for_directory(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Directory, url) + } + + /// Returns the `SourceId` corresponding to the main repository. + /// + /// This is the main cargo registry by default, but it can be overridden in + /// a `.cargo/config`. + pub fn crates_io(config: &Config) -> CargoResult { + config.crates_io_source_id(|| { + let cfg = ops::registry_configuration(config, None)?; + let url = if let Some(ref index) = cfg.index { + static WARNED: AtomicBool = ATOMIC_BOOL_INIT; + if !WARNED.swap(true, SeqCst) { + config.shell().warn( + "custom registry support via \ + the `registry.index` configuration is \ + being removed, this functionality \ + will not work in the future", + )?; + } + &index[..] + } else { + CRATES_IO + }; + let url = url.to_url()?; + SourceId::for_registry(&url) + }) + } + + pub fn alt_registry(config: &Config, key: &str) -> CargoResult { + let url = config.get_registry_index(key)?; + Ok(SourceId { + inner: Arc::new(SourceIdInner { + kind: Kind::Registry, + canonical_url: git::canonicalize_url(&url)?, + url, + precise: None, + name: Some(key.to_string()), + }), + }) + } + + /// Get this source URL + pub fn url(&self) -> &Url { + &self.inner.url + } + + pub fn display_registry(&self) -> String { + format!("registry `{}`", self.url()) + } + + /// Is this source from a filesystem path + pub fn is_path(&self) -> bool { + self.inner.kind == Kind::Path + } + + /// Is this source from a registry (either local or not) + pub fn is_registry(&self) -> bool { + match self.inner.kind { + Kind::Registry | Kind::LocalRegistry => true, + _ => false, + } + } + + /// Is this source from an alternative registry + pub fn is_alt_registry(&self) -> bool { + self.is_registry() && self.inner.name.is_some() + } + + /// Is this source from a git repository + pub fn is_git(&self) -> bool { + match self.inner.kind { + Kind::Git(_) => true, + _ => false, + } + } + + /// Creates an implementation of `Source` corresponding to this ID. + pub fn load<'a>(&self, config: &'a Config) -> CargoResult> { + trace!("loading SourceId; {}", self); + match self.inner.kind { + Kind::Git(..) => Ok(Box::new(GitSource::new(self, config)?)), + Kind::Path => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(PathSource::new(&path, self, config))) + } + Kind::Registry => Ok(Box::new(RegistrySource::remote(self, config))), + Kind::LocalRegistry => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(RegistrySource::local(self, &path, config))) + } + Kind::Directory => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(DirectorySource::new(&path, self, config))) + } + } + } + + /// Get the value of the precise field + pub fn precise(&self) -> Option<&str> { + self.inner.precise.as_ref().map(|s| &s[..]) + } + + /// Get the git reference if this is a git source, otherwise None. + pub fn git_reference(&self) -> Option<&GitReference> { + match self.inner.kind { + Kind::Git(ref s) => Some(s), + _ => None, + } + } + + /// Create a new SourceId from this source with the given `precise` + pub fn with_precise(&self, v: Option) -> SourceId { + SourceId { + inner: Arc::new(SourceIdInner { + precise: v, + ..(*self.inner).clone() + }), + } + } + + /// Whether the remote registry is the standard https://crates.io + pub fn is_default_registry(&self) -> bool { + match self.inner.kind { + Kind::Registry => {} + _ => return false, + } + self.inner.url.to_string() == CRATES_IO + } + + /// Hash `self` + /// + /// For paths, remove the workspace prefix so the same source will give the + /// same hash in different locations. + pub fn stable_hash(&self, workspace: &Path, into: &mut S) { + if self.is_path() { + if let Ok(p) = self.inner + .url + .to_file_path() + .unwrap() + .strip_prefix(workspace) + { + self.inner.kind.hash(into); + p.to_str().unwrap().hash(into); + return; + } + } + self.hash(into) + } +} + +impl PartialEq for SourceId { + fn eq(&self, other: &SourceId) -> bool { + (*self.inner).eq(&*other.inner) + } +} + +impl PartialOrd for SourceId { + fn partial_cmp(&self, other: &SourceId) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceId { + fn cmp(&self, other: &SourceId) -> Ordering { + self.inner.cmp(&other.inner) + } +} + +impl ser::Serialize for SourceId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + if self.is_path() { + None::.serialize(s) + } else { + s.collect_str(&self.to_url()) + } + } +} + +impl<'de> de::Deserialize<'de> for SourceId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + SourceId::from_url(&string).map_err(de::Error::custom) + } +} + +impl fmt::Display for SourceId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self.inner { + SourceIdInner { + kind: Kind::Path, + ref url, + .. + } => fmt::Display::fmt(url, f), + SourceIdInner { + kind: Kind::Git(ref reference), + ref url, + ref precise, + .. + } => { + write!(f, "{}", url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + + if let Some(ref s) = *precise { + let len = cmp::min(s.len(), 8); + write!(f, "#{}", &s[..len])?; + } + Ok(()) + } + SourceIdInner { + kind: Kind::Registry, + ref url, + .. + } + | SourceIdInner { + kind: Kind::LocalRegistry, + ref url, + .. + } => write!(f, "registry `{}`", url), + SourceIdInner { + kind: Kind::Directory, + ref url, + .. + } => write!(f, "dir {}", url), + } + } +} + +// This custom implementation handles situations such as when two git sources +// point at *almost* the same URL, but not quite, even when they actually point +// to the same repository. +/// This method tests for self and other values to be equal, and is used by ==. +/// +/// For git repositories, the canonical url is checked. +impl PartialEq for SourceIdInner { + fn eq(&self, other: &SourceIdInner) -> bool { + if self.kind != other.kind { + return false; + } + if self.url == other.url { + return true; + } + + match (&self.kind, &other.kind) { + (&Kind::Git(ref ref1), &Kind::Git(ref ref2)) => { + ref1 == ref2 && self.canonical_url == other.canonical_url + } + _ => false, + } + } +} + +impl PartialOrd for SourceIdInner { + fn partial_cmp(&self, other: &SourceIdInner) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceIdInner { + fn cmp(&self, other: &SourceIdInner) -> Ordering { + match self.kind.cmp(&other.kind) { + Ordering::Equal => {} + ord => return ord, + } + match self.url.cmp(&other.url) { + Ordering::Equal => {} + ord => return ord, + } + match (&self.kind, &other.kind) { + (&Kind::Git(ref ref1), &Kind::Git(ref ref2)) => { + (ref1, &self.canonical_url).cmp(&(ref2, &other.canonical_url)) + } + _ => self.kind.cmp(&other.kind), + } + } +} + +// The hash of SourceId is used in the name of some Cargo folders, so shouldn't +// vary. `as_str` gives the serialisation of a url (which has a spec) and so +// insulates against possible changes in how the url crate does hashing. +impl Hash for SourceId { + fn hash(&self, into: &mut S) { + self.inner.kind.hash(into); + match *self.inner { + SourceIdInner { + kind: Kind::Git(..), + ref canonical_url, + .. + } => canonical_url.as_str().hash(into), + _ => self.inner.url.as_str().hash(into), + } + } +} + +/// A `Display`able view into a `SourceId` that will write it as a url +pub struct SourceIdToUrl<'a> { + inner: &'a SourceIdInner, +} + +impl<'a> fmt::Display for SourceIdToUrl<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self.inner { + SourceIdInner { + kind: Kind::Path, + ref url, + .. + } => write!(f, "path+{}", url), + SourceIdInner { + kind: Kind::Git(ref reference), + ref url, + ref precise, + .. + } => { + write!(f, "git+{}", url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + if let Some(precise) = precise.as_ref() { + write!(f, "#{}", precise)?; + } + Ok(()) + } + SourceIdInner { + kind: Kind::Registry, + ref url, + .. + } => write!(f, "registry+{}", url), + SourceIdInner { + kind: Kind::LocalRegistry, + ref url, + .. + } => write!(f, "local-registry+{}", url), + SourceIdInner { + kind: Kind::Directory, + ref url, + .. + } => write!(f, "directory+{}", url), + } + } +} + +impl GitReference { + /// Returns a `Display`able view of this git reference, or None if using + /// the head of the "master" branch + pub fn pretty_ref(&self) -> Option { + match *self { + GitReference::Branch(ref s) if *s == "master" => None, + _ => Some(PrettyRef { inner: self }), + } + } +} + +/// A git reference that can be `Display`ed +pub struct PrettyRef<'a> { + inner: &'a GitReference, +} + +impl<'a> fmt::Display for PrettyRef<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self.inner { + GitReference::Branch(ref b) => write!(f, "branch={}", b), + GitReference::Tag(ref s) => write!(f, "tag={}", s), + GitReference::Rev(ref s) => write!(f, "rev={}", s), + } + } +} + +#[cfg(test)] +mod tests { + use super::{GitReference, Kind, SourceId}; + use util::ToUrl; + + #[test] + fn github_sources_equal() { + let loc = "https://github.com/foo/bar".to_url().unwrap(); + let master = Kind::Git(GitReference::Branch("master".to_string())); + let s1 = SourceId::new(master.clone(), loc).unwrap(); + + let loc = "git://github.com/foo/bar".to_url().unwrap(); + let s2 = SourceId::new(master, loc.clone()).unwrap(); + + assert_eq!(s1, s2); + + let foo = Kind::Git(GitReference::Branch("foo".to_string())); + let s3 = SourceId::new(foo, loc).unwrap(); + assert_ne!(s1, s3); + } +} diff --git a/src/cargo/core/summary.rs b/src/cargo/core/summary.rs new file mode 100644 index 000000000..b7c36d8dd --- /dev/null +++ b/src/cargo/core/summary.rs @@ -0,0 +1,394 @@ +use std::collections::{BTreeMap, HashMap}; +use std::mem; +use std::rc::Rc; + +use serde::{Serialize, Serializer}; + +use core::interning::InternedString; +use core::{Dependency, PackageId, SourceId}; +use semver::Version; + +use util::CargoResult; + +/// Subset of a `Manifest`. Contains only the most important information about +/// a package. +/// +/// Summaries are cloned, and should not be mutated after creation +#[derive(Debug, Clone)] +pub struct Summary { + inner: Rc, +} + +#[derive(Debug, Clone)] +struct Inner { + package_id: PackageId, + dependencies: Vec, + features: FeatureMap, + checksum: Option, + links: Option, + namespaced_features: bool, +} + +impl Summary { + pub fn new( + pkg_id: PackageId, + dependencies: Vec, + features: BTreeMap>, + links: Option, + namespaced_features: bool, + ) -> CargoResult { + for dep in dependencies.iter() { + if !namespaced_features && features.get(&*dep.name()).is_some() { + bail!( + "Features and dependencies cannot have the \ + same name: `{}`", + dep.name() + ) + } + if dep.is_optional() && !dep.is_transitive() { + bail!( + "Dev-dependencies are not allowed to be optional: `{}`", + dep.name() + ) + } + } + let feature_map = build_feature_map(features, &dependencies, namespaced_features)?; + Ok(Summary { + inner: Rc::new(Inner { + package_id: pkg_id, + dependencies, + features: feature_map, + checksum: None, + links: links.map(|l| InternedString::new(&l)), + namespaced_features, + }), + }) + } + + pub fn package_id(&self) -> &PackageId { + &self.inner.package_id + } + pub fn name(&self) -> InternedString { + self.package_id().name() + } + pub fn version(&self) -> &Version { + self.package_id().version() + } + pub fn source_id(&self) -> &SourceId { + self.package_id().source_id() + } + pub fn dependencies(&self) -> &[Dependency] { + &self.inner.dependencies + } + pub fn features(&self) -> &FeatureMap { + &self.inner.features + } + pub fn checksum(&self) -> Option<&str> { + self.inner.checksum.as_ref().map(|s| &s[..]) + } + pub fn links(&self) -> Option { + self.inner.links + } + pub fn namespaced_features(&self) -> bool { + self.inner.namespaced_features + } + + pub fn override_id(mut self, id: PackageId) -> Summary { + Rc::make_mut(&mut self.inner).package_id = id; + self + } + + pub fn set_checksum(mut self, cksum: String) -> Summary { + Rc::make_mut(&mut self.inner).checksum = Some(cksum); + self + } + + pub fn map_dependencies(mut self, f: F) -> Summary + where + F: FnMut(Dependency) -> Dependency, + { + { + let slot = &mut Rc::make_mut(&mut self.inner).dependencies; + let deps = mem::replace(slot, Vec::new()); + *slot = deps.into_iter().map(f).collect(); + } + self + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) -> Summary { + let me = if self.package_id().source_id() == to_replace { + let new_id = self.package_id().with_source_id(replace_with); + self.override_id(new_id) + } else { + self + }; + me.map_dependencies(|dep| dep.map_source(to_replace, replace_with)) + } +} + +impl PartialEq for Summary { + fn eq(&self, other: &Summary) -> bool { + self.inner.package_id == other.inner.package_id + } +} + +// Checks features for errors, bailing out a CargoResult:Err if invalid, +// and creates FeatureValues for each feature. +fn build_feature_map( + features: BTreeMap>, + dependencies: &[Dependency], + namespaced: bool, +) -> CargoResult { + use self::FeatureValue::*; + let mut dep_map = HashMap::new(); + for dep in dependencies.iter() { + dep_map.entry(dep.name().as_str()) + .or_insert(Vec::new()) + .push(dep); + } + + let mut map = BTreeMap::new(); + for (feature, list) in features.iter() { + // If namespaced features is active and the key is the same as that of an + // optional dependency, that dependency must be included in the values. + // Thus, if a `feature` is found that has the same name as a dependency, we + // (a) bail out if the dependency is non-optional, and (b) we track if the + // feature requirements include the dependency `crate:feature` in the list. + // This is done with the `dependency_found` variable, which can only be + // false if features are namespaced and the current feature key is the same + // as the name of an optional dependency. If so, it gets set to true during + // iteration over the list if the dependency is found in the list. + let mut dependency_found = if namespaced { + match dep_map.get(feature.as_str()) { + Some(ref dep_data) => { + if !dep_data.iter().any(|d| d.is_optional()) { + bail!( + "Feature `{}` includes the dependency of the same name, but this is \ + left implicit in the features included by this feature.\n\ + Additionally, the dependency must be marked as optional to be \ + included in the feature definition.\n\ + Consider adding `crate:{}` to this feature's requirements \ + and marking the dependency as `optional = true`", + feature, + feature + ) + } else { + false + } + } + None => true, + } + } else { + true + }; + + let mut values = vec![]; + for dep in list { + let val = FeatureValue::build( + InternedString::new(dep), + |fs| features.contains_key(fs), + namespaced, + ); + + // Find data for the referenced dependency... + let dep_data = { + match val { + Feature(ref dep_name) | Crate(ref dep_name) | CrateFeature(ref dep_name, _) => { + dep_map.get(dep_name.as_str()) + } + } + }; + let is_optional_dep = dep_data.iter() + .flat_map(|d| d.iter()) + .any(|d| d.is_optional()); + if let FeatureValue::Crate(ref dep_name) = val { + // If we have a dependency value, check if this is the dependency named + // the same as the feature that we were looking for. + if !dependency_found && feature == dep_name.as_str() { + dependency_found = true; + } + } + + match (&val, dep_data.is_some(), is_optional_dep) { + // The value is a feature. If features are namespaced, this just means + // it's not prefixed with `crate:`, so we have to check whether the + // feature actually exist. If the feature is not defined *and* an optional + // dependency of the same name exists, the feature is defined implicitly + // here by adding it to the feature map, pointing to the dependency. + // If features are not namespaced, it's been validated as a feature already + // while instantiating the `FeatureValue` in `FeatureValue::build()`, so + // we don't have to do so here. + (&Feature(feat), _, true) => { + if namespaced && !features.contains_key(&*feat) { + map.insert(feat.to_string(), vec![FeatureValue::Crate(feat)]); + } + } + // If features are namespaced and the value is not defined as a feature + // and there is no optional dependency of the same name, error out. + // If features are not namespaced, there must be an existing feature + // here (checked by `FeatureValue::build()`), so it will always be defined. + (&Feature(feat), dep_exists, false) => { + if namespaced && !features.contains_key(&*feat) { + if dep_exists { + bail!( + "Feature `{}` includes `{}` which is not defined as a feature.\n\ + A non-optional dependency of the same name is defined; consider \ + adding `optional = true` to its definition", + feature, + feat + ) + } else { + bail!( + "Feature `{}` includes `{}` which is not defined as a feature", + feature, + feat + ) + } + } + } + // The value is a dependency. If features are namespaced, it is explicitly + // tagged as such (`crate:value`). If features are not namespaced, any value + // not recognized as a feature is pegged as a `Crate`. Here we handle the case + // where the dependency exists but is non-optional. It branches on namespaced + // just to provide the correct string for the crate dependency in the error. + (&Crate(ref dep), true, false) => if namespaced { + bail!( + "Feature `{}` includes `crate:{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + } else { + bail!( + "Feature `{}` depends on `{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + }, + // If namespaced, the value was tagged as a dependency; if not namespaced, + // this could be anything not defined as a feature. This handles the case + // where no such dependency is actually defined; again, the branch on + // namespaced here is just to provide the correct string in the error. + (&Crate(ref dep), false, _) => if namespaced { + bail!( + "Feature `{}` includes `crate:{}` which is not a known \ + dependency", + feature, + dep + ) + } else { + bail!( + "Feature `{}` includes `{}` which is neither a dependency nor \ + another feature", + feature, + dep + ) + }, + (&Crate(_), true, true) => {} + // If the value is a feature for one of the dependencies, bail out if no such + // dependency is actually defined in the manifest. + (&CrateFeature(ref dep, _), false, _) => bail!( + "Feature `{}` requires a feature of `{}` which is not a \ + dependency", + feature, + dep + ), + (&CrateFeature(_, _), true, _) => {} + } + values.push(val); + } + + if !dependency_found { + // If we have not found the dependency of the same-named feature, we should + // bail here. + bail!( + "Feature `{}` includes the optional dependency of the \ + same name, but this is left implicit in the features \ + included by this feature.\nConsider adding \ + `crate:{}` to this feature's requirements.", + feature, + feature + ) + } + + map.insert(feature.clone(), values); + } + Ok(map) +} + +/// FeatureValue represents the types of dependencies a feature can have: +/// +/// * Another feature +/// * An optional dependency +/// * A feature in a depedency +/// +/// The selection between these 3 things happens as part of the construction of the FeatureValue. +#[derive(Clone, Debug)] +pub enum FeatureValue { + Feature(InternedString), + Crate(InternedString), + CrateFeature(InternedString, InternedString), +} + +impl FeatureValue { + fn build(feature: InternedString, is_feature: T, namespaced: bool) -> FeatureValue + where + T: Fn(&str) -> bool, + { + match (feature.find('/'), namespaced) { + (Some(pos), _) => { + let (dep, dep_feat) = feature.split_at(pos); + let dep_feat = &dep_feat[1..]; + FeatureValue::CrateFeature(InternedString::new(dep), InternedString::new(dep_feat)) + } + (None, true) if feature.starts_with("crate:") => { + FeatureValue::Crate(InternedString::new(&feature[6..])) + } + (None, true) => FeatureValue::Feature(feature), + (None, false) if is_feature(&feature) => FeatureValue::Feature(feature), + (None, false) => FeatureValue::Crate(feature), + } + } + + pub fn new(feature: InternedString, s: &Summary) -> FeatureValue { + Self::build( + feature, + |fs| s.features().contains_key(fs), + s.namespaced_features(), + ) + } + + pub fn to_string(&self, s: &Summary) -> String { + use self::FeatureValue::*; + match *self { + Feature(ref f) => f.to_string(), + Crate(ref c) => if s.namespaced_features() { + format!("crate:{}", &c) + } else { + c.to_string() + }, + CrateFeature(ref c, ref f) => [c.as_ref(), f.as_ref()].join("/"), + } + } +} + +impl Serialize for FeatureValue { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use self::FeatureValue::*; + match *self { + Feature(ref f) => serializer.serialize_str(f), + Crate(ref c) => serializer.serialize_str(c), + CrateFeature(ref c, ref f) => { + serializer.serialize_str(&[c.as_ref(), f.as_ref()].join("/")) + } + } + } +} + +pub type FeatureMap = BTreeMap>; diff --git a/src/cargo/core/workspace.rs b/src/cargo/core/workspace.rs new file mode 100644 index 000000000..5e15b2e20 --- /dev/null +++ b/src/cargo/core/workspace.rs @@ -0,0 +1,855 @@ +use std::cell::RefCell; +use std::collections::hash_map::{Entry, HashMap}; +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; +use std::slice; + +use glob::glob; +use url::Url; + +use core::profiles::Profiles; +use core::registry::PackageRegistry; +use core::{Dependency, PackageIdSpec}; +use core::{EitherManifest, Package, SourceId, VirtualManifest}; +use ops; +use sources::PathSource; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; +use util::toml::read_manifest; +use util::{Config, Filesystem}; + +/// The core abstraction in Cargo for working with a workspace of crates. +/// +/// A workspace is often created very early on and then threaded through all +/// other functions. It's typically through this object that the current +/// package is loaded and/or learned about. +#[derive(Debug)] +pub struct Workspace<'cfg> { + config: &'cfg Config, + + // This path is a path to where the current cargo subcommand was invoked + // from. That is, this is the `--manifest-path` argument to Cargo, and + // points to the "main crate" that we're going to worry about. + current_manifest: PathBuf, + + // A list of packages found in this workspace. Always includes at least the + // package mentioned by `current_manifest`. + packages: Packages<'cfg>, + + // If this workspace includes more than one crate, this points to the root + // of the workspace. This is `None` in the case that `[workspace]` is + // missing, `package.workspace` is missing, and no `Cargo.toml` above + // `current_manifest` was found on the filesystem with `[workspace]`. + root_manifest: Option, + + // Shared target directory for all the packages of this workspace. + // `None` if the default path of `root/target` should be used. + target_dir: Option, + + // List of members in this workspace with a listing of all their manifest + // paths. The packages themselves can be looked up through the `packages` + // set above. + members: Vec, + + // The subset of `members` that are used by the + // `build`, `check`, `test`, and `bench` subcommands + // when no package is selected with `--package` / `-p` and `--all` + // is not used. + // + // This is set by the `default-members` config + // in the `[workspace]` section. + // When unset, this is the same as `members` for virtual workspaces + // (`--all` is implied) + // or only the root package for non-virtual workspaces. + default_members: Vec, + + // True, if this is a temporary workspace created for the purposes of + // cargo install or cargo package. + is_ephemeral: bool, + + // True if this workspace should enforce optional dependencies even when + // not needed; false if this workspace should only enforce dependencies + // needed by the current configuration (such as in cargo install). In some + // cases `false` also results in the non-enforcement of dev-dependencies. + require_optional_deps: bool, + + // A cache of loaded packages for particular paths which is disjoint from + // `packages` up above, used in the `load` method down below. + loaded_packages: RefCell>, +} + +// Separate structure for tracking loaded packages (to avoid loading anything +// twice), and this is separate to help appease the borrow checker. +#[derive(Debug)] +struct Packages<'cfg> { + config: &'cfg Config, + packages: HashMap, +} + +#[derive(Debug)] +enum MaybePackage { + Package(Package), + Virtual(VirtualManifest), +} + +/// Configuration of a workspace in a manifest. +#[derive(Debug, Clone)] +pub enum WorkspaceConfig { + /// Indicates that `[workspace]` was present and the members were + /// optionally specified as well. + Root(WorkspaceRootConfig), + + /// Indicates that `[workspace]` was present and the `root` field is the + /// optional value of `package.workspace`, if present. + Member { root: Option }, +} + +/// Intermediate configuration of a workspace root in a manifest. +/// +/// Knows the Workspace Root path, as well as `members` and `exclude` lists of path patterns, which +/// together tell if some path is recognized as a member by this root or not. +#[derive(Debug, Clone)] +pub struct WorkspaceRootConfig { + root_dir: PathBuf, + members: Option>, + default_members: Option>, + exclude: Vec, +} + +/// An iterator over the member packages of a workspace, returned by +/// `Workspace::members` +pub struct Members<'a, 'cfg: 'a> { + ws: &'a Workspace<'cfg>, + iter: slice::Iter<'a, PathBuf>, +} + +impl<'cfg> Workspace<'cfg> { + /// Creates a new workspace given the target manifest pointed to by + /// `manifest_path`. + /// + /// This function will construct the entire workspace by determining the + /// root and all member packages. It will then validate the workspace + /// before returning it, so `Ok` is only returned for valid workspaces. + pub fn new(manifest_path: &Path, config: &'cfg Config) -> CargoResult> { + let target_dir = config.target_dir()?; + + let mut ws = Workspace { + config, + current_manifest: manifest_path.to_path_buf(), + packages: Packages { + config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir, + members: Vec::new(), + default_members: Vec::new(), + is_ephemeral: false, + require_optional_deps: true, + loaded_packages: RefCell::new(HashMap::new()), + }; + ws.root_manifest = ws.find_root(manifest_path)?; + ws.find_members()?; + ws.validate()?; + Ok(ws) + } + + /// Creates a "temporary workspace" from one package which only contains + /// that package. + /// + /// This constructor will not touch the filesystem and only creates an + /// in-memory workspace. That is, all configuration is ignored, it's just + /// intended for that one package. + /// + /// This is currently only used in niche situations like `cargo install` or + /// `cargo package`. + pub fn ephemeral( + package: Package, + config: &'cfg Config, + target_dir: Option, + require_optional_deps: bool, + ) -> CargoResult> { + let mut ws = Workspace { + config, + current_manifest: package.manifest_path().to_path_buf(), + packages: Packages { + config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir: None, + members: Vec::new(), + default_members: Vec::new(), + is_ephemeral: true, + require_optional_deps, + loaded_packages: RefCell::new(HashMap::new()), + }; + { + let key = ws.current_manifest.parent().unwrap(); + let package = MaybePackage::Package(package); + ws.packages.packages.insert(key.to_path_buf(), package); + ws.target_dir = if let Some(dir) = target_dir { + Some(dir) + } else { + ws.config.target_dir()? + }; + ws.members.push(ws.current_manifest.clone()); + ws.default_members.push(ws.current_manifest.clone()); + } + Ok(ws) + } + + /// Returns the current package of this workspace. + /// + /// Note that this can return an error if it the current manifest is + /// actually a "virtual Cargo.toml", in which case an error is returned + /// indicating that something else should be passed. + pub fn current(&self) -> CargoResult<&Package> { + let pkg = self.current_opt().ok_or_else(|| { + format_err!( + "manifest path `{}` is a virtual manifest, but this \ + command requires running against an actual package in \ + this workspace", + self.current_manifest.display() + ) + })?; + Ok(pkg) + } + + pub fn current_opt(&self) -> Option<&Package> { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(ref p) => Some(p), + MaybePackage::Virtual(..) => None, + } + } + + pub fn is_virtual(&self) -> bool { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(..) => false, + MaybePackage::Virtual(..) => true, + } + } + + /// Returns the `Config` this workspace is associated with. + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn profiles(&self) -> &Profiles { + let root = self.root_manifest + .as_ref() + .unwrap_or(&self.current_manifest); + match *self.packages.get(root) { + MaybePackage::Package(ref p) => p.manifest().profiles(), + MaybePackage::Virtual(ref vm) => vm.profiles(), + } + } + + /// Returns the root path of this workspace. + /// + /// That is, this returns the path of the directory containing the + /// `Cargo.toml` which is the root of this workspace. + pub fn root(&self) -> &Path { + match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + }.parent() + .unwrap() + } + + pub fn target_dir(&self) -> Filesystem { + self.target_dir + .clone() + .unwrap_or_else(|| Filesystem::new(self.root().join("target"))) + } + + /// Returns the root [replace] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_replace(&self) -> &[(PackageIdSpec, Dependency)] { + let path = match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + }; + match *self.packages.get(path) { + MaybePackage::Package(ref p) => p.manifest().replace(), + MaybePackage::Virtual(ref vm) => vm.replace(), + } + } + + /// Returns the root [patch] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_patch(&self) -> &HashMap> { + let path = match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + }; + match *self.packages.get(path) { + MaybePackage::Package(ref p) => p.manifest().patch(), + MaybePackage::Virtual(ref vm) => vm.patch(), + } + } + + /// Returns an iterator over all packages in this workspace + pub fn members<'a>(&'a self) -> Members<'a, 'cfg> { + Members { + ws: self, + iter: self.members.iter(), + } + } + + /// Returns an iterator over default packages in this workspace + pub fn default_members<'a>(&'a self) -> Members<'a, 'cfg> { + Members { + ws: self, + iter: self.default_members.iter(), + } + } + + /// Returns true if the package is a member of the workspace. + pub fn is_member(&self, pkg: &Package) -> bool { + self.members().any(|p| p == pkg) + } + + pub fn is_ephemeral(&self) -> bool { + self.is_ephemeral + } + + pub fn require_optional_deps(&self) -> bool { + self.require_optional_deps + } + + pub fn set_require_optional_deps<'a>( + &'a mut self, + require_optional_deps: bool, + ) -> &mut Workspace<'cfg> { + self.require_optional_deps = require_optional_deps; + self + } + + /// Finds the root of a workspace for the crate whose manifest is located + /// at `manifest_path`. + /// + /// This will parse the `Cargo.toml` at `manifest_path` and then interpret + /// the workspace configuration, optionally walking up the filesystem + /// looking for other workspace roots. + /// + /// Returns an error if `manifest_path` isn't actually a valid manifest or + /// if some other transient error happens. + fn find_root(&mut self, manifest_path: &Path) -> CargoResult> { + fn read_root_pointer(member_manifest: &Path, root_link: &str) -> CargoResult { + let path = member_manifest + .parent() + .unwrap() + .join(root_link) + .join("Cargo.toml"); + debug!("find_root - pointer {}", path.display()); + Ok(paths::normalize_path(&path)) + }; + + { + let current = self.packages.load(manifest_path)?; + match *current.workspace_config() { + WorkspaceConfig::Root(_) => { + debug!("find_root - is root {}", manifest_path.display()); + return Ok(Some(manifest_path.to_path_buf())); + } + WorkspaceConfig::Member { + root: Some(ref path_to_root), + } => return Ok(Some(read_root_pointer(manifest_path, path_to_root)?)), + WorkspaceConfig::Member { root: None } => {} + } + } + + for path in paths::ancestors(manifest_path).skip(2) { + let ances_manifest_path = path.join("Cargo.toml"); + debug!("find_root - trying {}", ances_manifest_path.display()); + if ances_manifest_path.exists() { + match *self.packages.load(&ances_manifest_path)?.workspace_config() { + WorkspaceConfig::Root(ref ances_root_config) => { + debug!("find_root - found a root checking exclusion"); + if !ances_root_config.is_excluded(manifest_path) { + debug!("find_root - found!"); + return Ok(Some(ances_manifest_path)); + } + } + WorkspaceConfig::Member { + root: Some(ref path_to_root), + } => { + debug!("find_root - found pointer"); + return Ok(Some(read_root_pointer(&ances_manifest_path, path_to_root)?)); + } + WorkspaceConfig::Member { .. } => {} + } + } + + // Don't walk across `CARGO_HOME` when we're looking for the + // workspace root. Sometimes a project will be organized with + // `CARGO_HOME` pointing inside of the workspace root or in the + // current project, but we don't want to mistakenly try to put + // crates.io crates into the workspace by accident. + if self.config.home() == path { + break; + } + } + + Ok(None) + } + + /// After the root of a workspace has been located, probes for all members + /// of a workspace. + /// + /// If the `workspace.members` configuration is present, then this just + /// verifies that those are all valid packages to point to. Otherwise, this + /// will transitively follow all `path` dependencies looking for members of + /// the workspace. + fn find_members(&mut self) -> CargoResult<()> { + let root_manifest_path = match self.root_manifest { + Some(ref path) => path.clone(), + None => { + debug!("find_members - only me as a member"); + self.members.push(self.current_manifest.clone()); + self.default_members.push(self.current_manifest.clone()); + return Ok(()); + } + }; + + let members_paths; + let default_members_paths; + { + let root_package = self.packages.load(&root_manifest_path)?; + match *root_package.workspace_config() { + WorkspaceConfig::Root(ref root_config) => { + members_paths = + root_config.members_paths(root_config.members.as_ref().unwrap_or(&vec![]))?; + default_members_paths = if let Some(ref default) = root_config.default_members { + Some(root_config.members_paths(default)?) + } else { + None + } + } + _ => bail!( + "root of a workspace inferred but wasn't a root: {}", + root_manifest_path.display() + ), + } + } + + for path in members_paths { + self.find_path_deps(&path.join("Cargo.toml"), &root_manifest_path, false)?; + } + + if let Some(default) = default_members_paths { + for path in default { + let manifest_path = paths::normalize_path(&path.join("Cargo.toml")); + if !self.members.contains(&manifest_path) { + bail!( + "package `{}` is listed in workspace’s default-members \ + but is not a member.", + path.display() + ) + } + self.default_members.push(manifest_path) + } + } else if self.is_virtual() { + self.default_members = self.members.clone() + } else { + self.default_members.push(self.current_manifest.clone()) + } + + self.find_path_deps(&root_manifest_path, &root_manifest_path, false) + } + + fn find_path_deps( + &mut self, + manifest_path: &Path, + root_manifest: &Path, + is_path_dep: bool, + ) -> CargoResult<()> { + let manifest_path = paths::normalize_path(manifest_path); + if self.members.contains(&manifest_path) { + return Ok(()); + } + if is_path_dep && !manifest_path.parent().unwrap().starts_with(self.root()) + && self.find_root(&manifest_path)? != self.root_manifest + { + // If `manifest_path` is a path dependency outside of the workspace, + // don't add it, or any of its dependencies, as a members. + return Ok(()); + } + + if let WorkspaceConfig::Root(ref root_config) = + *self.packages.load(root_manifest)?.workspace_config() + { + if root_config.is_excluded(&manifest_path) { + return Ok(()); + } + } + + debug!("find_members - {}", manifest_path.display()); + self.members.push(manifest_path.clone()); + + let candidates = { + let pkg = match *self.packages.load(&manifest_path)? { + MaybePackage::Package(ref p) => p, + MaybePackage::Virtual(_) => return Ok(()), + }; + pkg.dependencies() + .iter() + .map(|d| d.source_id()) + .filter(|d| d.is_path()) + .filter_map(|d| d.url().to_file_path().ok()) + .map(|p| p.join("Cargo.toml")) + .collect::>() + }; + for candidate in candidates { + self.find_path_deps(&candidate, root_manifest, true)?; + } + Ok(()) + } + + /// Validates a workspace, ensuring that a number of invariants are upheld: + /// + /// 1. A workspace only has one root. + /// 2. All workspace members agree on this one root as the root. + /// 3. The current crate is a member of this workspace. + fn validate(&mut self) -> CargoResult<()> { + if self.root_manifest.is_none() { + return Ok(()); + } + + let mut roots = Vec::new(); + { + let mut names = BTreeMap::new(); + for member in self.members.iter() { + let package = self.packages.get(member); + match *package.workspace_config() { + WorkspaceConfig::Root(_) => { + roots.push(member.parent().unwrap().to_path_buf()); + } + WorkspaceConfig::Member { .. } => {} + } + let name = match *package { + MaybePackage::Package(ref p) => p.name(), + MaybePackage::Virtual(_) => continue, + }; + if let Some(prev) = names.insert(name, member) { + bail!( + "two packages named `{}` in this workspace:\n\ + - {}\n\ + - {}", + name, + prev.display(), + member.display() + ); + } + } + } + + match roots.len() { + 0 => bail!( + "`package.workspace` configuration points to a crate \ + which is not configured with [workspace]: \n\ + configuration at: {}\n\ + points to: {}", + self.current_manifest.display(), + self.root_manifest.as_ref().unwrap().display() + ), + 1 => {} + _ => { + bail!( + "multiple workspace roots found in the same workspace:\n{}", + roots + .iter() + .map(|r| format!(" {}", r.display())) + .collect::>() + .join("\n") + ); + } + } + + for member in self.members.clone() { + let root = self.find_root(&member)?; + if root == self.root_manifest { + continue; + } + + match root { + Some(root) => { + bail!( + "package `{}` is a member of the wrong workspace\n\ + expected: {}\n\ + actual: {}", + member.display(), + self.root_manifest.as_ref().unwrap().display(), + root.display() + ); + } + None => { + bail!( + "workspace member `{}` is not hierarchically below \ + the workspace root `{}`", + member.display(), + self.root_manifest.as_ref().unwrap().display() + ); + } + } + } + + if !self.members.contains(&self.current_manifest) { + let root = self.root_manifest.as_ref().unwrap(); + let root_dir = root.parent().unwrap(); + let current_dir = self.current_manifest.parent().unwrap(); + let root_pkg = self.packages.get(root); + + // FIXME: Make this more generic by using a relative path resolver between member and + // root. + let members_msg = match current_dir.strip_prefix(root_dir) { + Ok(rel) => format!( + "this may be fixable by adding `{}` to the \ + `workspace.members` array of the manifest \ + located at: {}", + rel.display(), + root.display() + ), + Err(_) => format!( + "this may be fixable by adding a member to \ + the `workspace.members` array of the \ + manifest located at: {}", + root.display() + ), + }; + let extra = match *root_pkg { + MaybePackage::Virtual(_) => members_msg, + MaybePackage::Package(ref p) => { + let has_members_list = match *p.manifest().workspace_config() { + WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(), + WorkspaceConfig::Member { .. } => unreachable!(), + }; + if !has_members_list { + format!( + "this may be fixable by ensuring that this \ + crate is depended on by the workspace \ + root: {}", + root.display() + ) + } else { + members_msg + } + } + }; + bail!( + "current package believes it's in a workspace when it's not:\n\ + current: {}\n\ + workspace: {}\n\n{}", + self.current_manifest.display(), + root.display(), + extra + ); + } + + if let Some(ref root_manifest) = self.root_manifest { + for pkg in self.members() + .filter(|p| p.manifest_path() != root_manifest) + { + if pkg.manifest().original().has_profiles() { + let message = &format!( + "profiles for the non root package will be ignored, \ + specify profiles at the workspace root:\n\ + package: {}\n\ + workspace: {}", + pkg.manifest_path().display(), + root_manifest.display() + ); + + //TODO: remove `Eq` bound from `Profiles` when the warning is removed. + self.config.shell().warn(&message)?; + } + } + } + + Ok(()) + } + + pub fn load(&self, manifest_path: &Path) -> CargoResult { + match self.packages.maybe_get(manifest_path) { + Some(&MaybePackage::Package(ref p)) => return Ok(p.clone()), + Some(&MaybePackage::Virtual(_)) => bail!("cannot load workspace root"), + None => {} + } + + let mut loaded = self.loaded_packages.borrow_mut(); + if let Some(p) = loaded.get(manifest_path).cloned() { + return Ok(p); + } + let source_id = SourceId::for_path(manifest_path.parent().unwrap())?; + let (package, _nested_paths) = ops::read_package(manifest_path, &source_id, self.config)?; + loaded.insert(manifest_path.to_path_buf(), package.clone()); + Ok(package) + } + + /// Preload the provided registry with already loaded packages. + /// + /// A workspace may load packages during construction/parsing/early phases + /// for various operations, and this preload step avoids doubly-loading and + /// parsing crates on the filesystem by inserting them all into the registry + /// with their in-memory formats. + pub fn preload(&self, registry: &mut PackageRegistry<'cfg>) { + // These can get weird as this generally represents a workspace during + // `cargo install`. Things like git repositories will actually have a + // `PathSource` with multiple entries in it, so the logic below is + // mostly just an optimization for normal `cargo build` in workspaces + // during development. + if self.is_ephemeral { + return; + } + + for pkg in self.packages.packages.values() { + let pkg = match *pkg { + MaybePackage::Package(ref p) => p.clone(), + MaybePackage::Virtual(_) => continue, + }; + let mut src = PathSource::new( + pkg.manifest_path(), + pkg.package_id().source_id(), + self.config, + ); + src.preload_with(pkg); + registry.add_preloaded(Box::new(src)); + } + } +} + +impl<'cfg> Packages<'cfg> { + fn get(&self, manifest_path: &Path) -> &MaybePackage { + self.maybe_get(manifest_path).unwrap() + } + + fn maybe_get(&self, manifest_path: &Path) -> Option<&MaybePackage> { + self.packages.get(manifest_path.parent().unwrap()) + } + + fn load(&mut self, manifest_path: &Path) -> CargoResult<&MaybePackage> { + let key = manifest_path.parent().unwrap(); + match self.packages.entry(key.to_path_buf()) { + Entry::Occupied(e) => Ok(e.into_mut()), + Entry::Vacant(v) => { + let source_id = SourceId::for_path(key)?; + let (manifest, _nested_paths) = + read_manifest(manifest_path, &source_id, self.config)?; + Ok(v.insert(match manifest { + EitherManifest::Real(manifest) => { + MaybePackage::Package(Package::new(manifest, manifest_path)) + } + EitherManifest::Virtual(vm) => MaybePackage::Virtual(vm), + })) + } + } + } +} + +impl<'a, 'cfg> Members<'a, 'cfg> { + pub fn is_empty(self) -> bool { + self.count() == 0 + } +} + +impl<'a, 'cfg> Iterator for Members<'a, 'cfg> { + type Item = &'a Package; + + fn next(&mut self) -> Option<&'a Package> { + loop { + let next = self.iter.next().map(|path| self.ws.packages.get(path)); + match next { + Some(&MaybePackage::Package(ref p)) => return Some(p), + Some(&MaybePackage::Virtual(_)) => {} + None => return None, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl MaybePackage { + fn workspace_config(&self) -> &WorkspaceConfig { + match *self { + MaybePackage::Package(ref p) => p.manifest().workspace_config(), + MaybePackage::Virtual(ref vm) => vm.workspace_config(), + } + } +} + +impl WorkspaceRootConfig { + /// Create a new Intermediate Workspace Root configuration. + pub fn new( + root_dir: &Path, + members: &Option>, + default_members: &Option>, + exclude: &Option>, + ) -> WorkspaceRootConfig { + WorkspaceRootConfig { + root_dir: root_dir.to_path_buf(), + members: members.clone(), + default_members: default_members.clone(), + exclude: exclude.clone().unwrap_or_default(), + } + } + + /// Checks the path against the `excluded` list. + /// + /// This method does NOT consider the `members` list. + fn is_excluded(&self, manifest_path: &Path) -> bool { + let excluded = self.exclude + .iter() + .any(|ex| manifest_path.starts_with(self.root_dir.join(ex))); + + let explicit_member = match self.members { + Some(ref members) => members + .iter() + .any(|mem| manifest_path.starts_with(self.root_dir.join(mem))), + None => false, + }; + + !explicit_member && excluded + } + + fn has_members_list(&self) -> bool { + self.members.is_some() + } + + fn members_paths(&self, globs: &[String]) -> CargoResult> { + let mut expanded_list = Vec::new(); + + for glob in globs { + let pathbuf = self.root_dir.join(glob); + let expanded_paths = Self::expand_member_path(&pathbuf)?; + + // If glob does not find any valid paths, then put the original + // path in the expanded list to maintain backwards compatibility. + if expanded_paths.is_empty() { + expanded_list.push(pathbuf); + } else { + expanded_list.extend(expanded_paths); + } + } + + Ok(expanded_list) + } + + fn expand_member_path(path: &Path) -> CargoResult> { + let path = match path.to_str() { + Some(p) => p, + None => return Ok(Vec::new()), + }; + let res = glob(path).chain_err(|| format_err!("could not parse pattern `{}`", &path))?; + let res = res.map(|p| { + p.chain_err(|| format_err!("unable to match path to pattern `{}`", &path)) + }).collect::, _>>()?; + Ok(res) + } +} diff --git a/src/cargo/lib.rs b/src/cargo/lib.rs new file mode 100644 index 000000000..07b2cd408 --- /dev/null +++ b/src/cargo/lib.rs @@ -0,0 +1,240 @@ +#![cfg_attr(test, deny(warnings))] +// Currently, Cargo does not use clippy for its source code. +// But if someone runs it they should know that +// @alexcrichton disagree with clippy on some style things +#![cfg_attr(feature = "cargo-clippy", allow(explicit_iter_loop))] + +extern crate atty; +extern crate clap; +#[cfg(target_os = "macos")] +extern crate core_foundation; +extern crate crates_io as registry; +extern crate crossbeam; +extern crate curl; +#[macro_use] +extern crate failure; +extern crate filetime; +extern crate flate2; +extern crate fs2; +extern crate git2; +extern crate glob; +extern crate hex; +extern crate home; +extern crate ignore; +extern crate jobserver; +#[macro_use] +extern crate lazy_static; +extern crate lazycell; +extern crate libc; +extern crate libgit2_sys; +#[macro_use] +extern crate log; +extern crate num_cpus; +extern crate same_file; +extern crate semver; +extern crate serde; +#[macro_use] +extern crate serde_derive; +extern crate serde_ignored; +#[macro_use] +extern crate serde_json; +extern crate shell_escape; +extern crate tar; +extern crate tempfile; +extern crate termcolor; +extern crate toml; +extern crate url; + +use std::fmt; + +use serde::ser; +use failure::Error; + +use core::Shell; +use core::shell::Verbosity::Verbose; + +pub use util::{CargoError, CargoResult, CliError, CliResult, Config}; +pub use util::errors::Internal; + +pub const CARGO_ENV: &str = "CARGO"; + +pub mod core; +pub mod ops; +pub mod sources; +pub mod util; + +pub struct CommitInfo { + pub short_commit_hash: String, + pub commit_hash: String, + pub commit_date: String, +} + +pub struct CfgInfo { + // Information about the git repository we may have been built from. + pub commit_info: Option, + // The release channel we were built for. + pub release_channel: String, +} + +pub struct VersionInfo { + pub major: u8, + pub minor: u8, + pub patch: u8, + pub pre_release: Option, + // Information that's only available when we were built with + // configure/make, rather than cargo itself. + pub cfg_info: Option, +} + +impl fmt::Display for VersionInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "cargo {}.{}.{}", self.major, self.minor, self.patch)?; + if let Some(channel) = self.cfg_info.as_ref().map(|ci| &ci.release_channel) { + if channel != "stable" { + write!(f, "-{}", channel)?; + let empty = String::from(""); + write!(f, "{}", self.pre_release.as_ref().unwrap_or(&empty))?; + } + }; + + if let Some(ref cfg) = self.cfg_info { + if let Some(ref ci) = cfg.commit_info { + write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?; + } + }; + Ok(()) + } +} + +pub fn print_json(obj: &T) { + let encoded = serde_json::to_string(&obj).unwrap(); + println!("{}", encoded); +} + +pub fn exit_with_error(err: CliError, shell: &mut Shell) -> ! { + debug!("exit_with_error; err={:?}", err); + if let Some(ref err) = err.error { + if let Some(clap_err) = err.downcast_ref::() { + clap_err.exit() + } + } + + let CliError { + error, + exit_code, + unknown, + } = err; + // exit_code == 0 is non-fatal error, e.g. docopt version info + let fatal = exit_code != 0; + + let hide = unknown && shell.verbosity() != Verbose; + + if let Some(error) = error { + if hide { + drop(shell.error("An unknown error occurred")) + } else if fatal { + drop(shell.error(&error)) + } else { + println!("{}", error); + } + + if !handle_cause(&error, shell) || hide { + drop(writeln!( + shell.err(), + "\nTo learn more, run the command again \ + with --verbose." + )); + } + } + + std::process::exit(exit_code) +} + +pub fn handle_error(err: CargoError, shell: &mut Shell) { + debug!("handle_error; err={:?}", &err); + + let _ignored_result = shell.error(&err); + handle_cause(&err, shell); +} + +fn handle_cause(cargo_err: &Error, shell: &mut Shell) -> bool { + fn print(error: String, shell: &mut Shell) { + drop(writeln!(shell.err(), "\nCaused by:")); + drop(writeln!(shell.err(), " {}", error)); + } + + let verbose = shell.verbosity(); + + if verbose == Verbose { + // The first error has already been printed to the shell + // Print all remaining errors + for err in cargo_err.causes().skip(1) { + print(err.to_string(), shell); + } + } else { + // The first error has already been printed to the shell + // Print remaining errors until one marked as Internal appears + for err in cargo_err.causes().skip(1) { + if err.downcast_ref::().is_some() { + return false; + } + + print(err.to_string(), shell); + } + } + + true +} + +pub fn version() -> VersionInfo { + macro_rules! option_env_str { + ($name:expr) => { option_env!($name).map(|s| s.to_string()) } + } + + // So this is pretty horrible... + // There are two versions at play here: + // - version of cargo-the-binary, which you see when you type `cargo --version` + // - version of cargo-the-library, which you download from crates.io for use + // in your projects. + // + // We want to make the `binary` version the same as the corresponding Rust/rustc release. + // At the same time, we want to keep the library version at `0.x`, because Cargo as + // a library is (and probably will always be) unstable. + // + // Historically, Cargo used the same version number for both the binary and the library. + // Specifically, rustc 1.x.z was paired with cargo 0.x+1.w. + // We continue to use this scheme for the library, but transform it to 1.x.w for the purposes + // of `cargo --version`. + let major = 1; + let minor = env!("CARGO_PKG_VERSION_MINOR").parse::().unwrap() - 1; + let patch = env!("CARGO_PKG_VERSION_PATCH").parse::().unwrap(); + + match option_env!("CFG_RELEASE_CHANNEL") { + // We have environment variables set up from configure/make. + Some(_) => { + let commit_info = option_env!("CFG_COMMIT_HASH").map(|s| CommitInfo { + commit_hash: s.to_string(), + short_commit_hash: option_env_str!("CFG_SHORT_COMMIT_HASH").unwrap(), + commit_date: option_env_str!("CFG_COMMIT_DATE").unwrap(), + }); + VersionInfo { + major, + minor, + patch, + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: Some(CfgInfo { + release_channel: option_env_str!("CFG_RELEASE_CHANNEL").unwrap(), + commit_info, + }), + } + } + // We are being compiled by Cargo itself. + None => VersionInfo { + major, + minor, + patch, + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: None, + }, + } +} diff --git a/src/cargo/ops/cargo_clean.rs b/src/cargo/ops/cargo_clean.rs new file mode 100644 index 000000000..6e830970c --- /dev/null +++ b/src/cargo/ops/cargo_clean.rs @@ -0,0 +1,141 @@ +use std::fs; +use std::path::Path; + +use core::compiler::{BuildConfig, BuildContext, CompileMode, Context, Kind, Unit}; +use core::profiles::ProfileFor; +use core::Workspace; +use ops; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; +use util::Config; + +pub struct CleanOptions<'a> { + pub config: &'a Config, + /// A list of packages to clean. If empty, everything is cleaned. + pub spec: Vec, + /// The target arch triple to clean, or None for the host arch + pub target: Option, + /// Whether to clean the release directory + pub release: bool, + /// Whether to just clean the doc directory + pub doc: bool, +} + +/// Cleans the project from build artifacts. +pub fn clean(ws: &Workspace, opts: &CleanOptions) -> CargoResult<()> { + let target_dir = ws.target_dir(); + let config = ws.config(); + + // If the doc option is set, we just want to delete the doc directory. + if opts.doc { + let target_dir = target_dir.join("doc"); + let target_dir = target_dir.into_path_unlocked(); + return rm_rf(&target_dir, config); + } + + // If we have a spec, then we need to delete some packages, otherwise, just + // remove the whole target directory and be done with it! + // + // Note that we don't bother grabbing a lock here as we're just going to + // blow it all away anyway. + if opts.spec.is_empty() { + let target_dir = target_dir.into_path_unlocked(); + return rm_rf(&target_dir, config); + } + + let (packages, resolve) = ops::resolve_ws(ws)?; + + let profiles = ws.profiles(); + let mut units = Vec::new(); + + for spec in opts.spec.iter() { + // Translate the spec to a Package + let pkgid = resolve.query(spec)?; + let pkg = packages.get(pkgid)?; + + // Generate all relevant `Unit` targets for this package + for target in pkg.targets() { + for kind in [Kind::Host, Kind::Target].iter() { + for mode in CompileMode::all_modes() { + for profile_for in ProfileFor::all_values() { + let profile = if mode.is_run_custom_build() { + profiles.get_profile_run_custom_build(&profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + *profile_for, + CompileMode::Build, + opts.release, + )) + } else { + profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + *profile_for, + *mode, + opts.release, + ) + }; + units.push(Unit { + pkg, + target, + profile, + kind: *kind, + mode: *mode, + }); + } + } + } + } + } + + let mut build_config = BuildConfig::new(config, Some(1), &opts.target, CompileMode::Build)?; + build_config.release = opts.release; + let bcx = BuildContext::new( + ws, + &resolve, + &packages, + opts.config, + &build_config, + profiles, + None, + )?; + let mut cx = Context::new(config, &bcx)?; + cx.prepare_units(None, &units)?; + + for unit in units.iter() { + rm_rf(&cx.files().fingerprint_dir(unit), config)?; + if unit.target.is_custom_build() { + if unit.mode.is_run_custom_build() { + rm_rf(&cx.files().build_script_out_dir(unit), config)?; + } else { + rm_rf(&cx.files().build_script_dir(unit), config)?; + } + continue; + } + + for output in cx.outputs(unit)?.iter() { + rm_rf(&output.path, config)?; + if let Some(ref dst) = output.hardlink { + rm_rf(dst, config)?; + } + } + } + + Ok(()) +} + +fn rm_rf(path: &Path, config: &Config) -> CargoResult<()> { + let m = fs::metadata(path); + if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { + config + .shell() + .verbose(|shell| shell.status("Removing", path.display()))?; + paths::remove_dir_all(path).chain_err(|| format_err!("could not remove build directory"))?; + } else if m.is_ok() { + config + .shell() + .verbose(|shell| shell.status("Removing", path.display()))?; + paths::remove_file(path).chain_err(|| format_err!("failed to remove build artifact"))?; + } + Ok(()) +} diff --git a/src/cargo/ops/cargo_compile.rs b/src/cargo/ops/cargo_compile.rs new file mode 100644 index 000000000..552c390e4 --- /dev/null +++ b/src/cargo/ops/cargo_compile.rs @@ -0,0 +1,751 @@ +//! +//! Cargo compile currently does the following steps: +//! +//! All configurations are already injected as environment variables via the +//! main cargo command +//! +//! 1. Read the manifest +//! 2. Shell out to `cargo-resolve` with a list of dependencies and sources as +//! stdin +//! +//! a. Shell out to `--do update` and `--do list` for each source +//! b. Resolve dependencies and return a list of name/version/source +//! +//! 3. Shell out to `--do download` for each source +//! 4. Shell out to `--do get` for each source, and build up the list of paths +//! to pass to rustc -L +//! 5. Call `cargo-rustc` with the results of the resolver zipped together with +//! the results of the `get` +//! +//! a. Topologically sort the dependencies +//! b. Compile each dependency in order, passing in the -L's pointing at each +//! previously compiled dependency +//! + +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::Arc; + +use core::compiler::{BuildConfig, BuildContext, Compilation, Context, DefaultExecutor, Executor}; +use core::compiler::{CompileMode, Kind, Unit}; +use core::profiles::{ProfileFor, Profiles}; +use core::resolver::{Method, Resolve}; +use core::{Package, Source, Target}; +use core::{PackageId, PackageIdSpec, TargetKind, Workspace}; +use ops; +use util::config::Config; +use util::{lev_distance, profile, CargoResult}; + +/// Contains information about how a package should be compiled. +#[derive(Debug)] +pub struct CompileOptions<'a> { + pub config: &'a Config, + /// Configuration information for a rustc build + pub build_config: BuildConfig, + /// Extra features to build for the root package + pub features: Vec, + /// Flag whether all available features should be built for the root package + pub all_features: bool, + /// Flag if the default feature should be built for the root package + pub no_default_features: bool, + /// A set of packages to build. + pub spec: Packages, + /// Filter to apply to the root package to select which targets will be + /// built. + pub filter: CompileFilter, + /// Extra arguments to be passed to rustdoc (for main crate and dependencies) + pub target_rustdoc_args: Option>, + /// The specified target will be compiled with all the available arguments, + /// note that this only accounts for the *final* invocation of rustc + pub target_rustc_args: Option>, + /// The directory to copy final artifacts to. Note that even if `out_dir` is + /// set, a copy of artifacts still could be found a `target/(debug\release)` + /// as usual. + // Note that, although the cmd-line flag name is `out-dir`, in code we use + // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. + pub export_dir: Option, +} + +impl<'a> CompileOptions<'a> { + pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult> { + Ok(CompileOptions { + config, + build_config: BuildConfig::new(config, None, &None, mode)?, + features: Vec::new(), + all_features: false, + no_default_features: false, + spec: ops::Packages::Packages(Vec::new()), + filter: CompileFilter::Default { + required_features_filterable: false, + }, + target_rustdoc_args: None, + target_rustc_args: None, + export_dir: None, + }) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Packages { + Default, + All, + OptOut(Vec), + Packages(Vec), +} + +impl Packages { + pub fn from_flags(all: bool, exclude: Vec, package: Vec) -> CargoResult { + Ok(match (all, exclude.len(), package.len()) { + (false, 0, 0) => Packages::Default, + (false, 0, _) => Packages::Packages(package), + (false, _, _) => bail!("--exclude can only be used together with --all"), + (true, 0, _) => Packages::All, + (true, _, _) => Packages::OptOut(exclude), + }) + } + + pub fn into_package_id_specs(&self, ws: &Workspace) -> CargoResult> { + let specs = match *self { + Packages::All => ws.members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + Packages::OptOut(ref opt_out) => ws.members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .filter(|p| opt_out.iter().position(|x| *x == p.name()).is_none()) + .collect(), + Packages::Packages(ref packages) if packages.is_empty() => { + vec![PackageIdSpec::from_package_id(ws.current()?.package_id())] + } + Packages::Packages(ref packages) => packages + .iter() + .map(|p| PackageIdSpec::parse(p)) + .collect::>>()?, + Packages::Default => ws.default_members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + }; + if specs.is_empty() { + if ws.is_virtual() { + bail!( + "manifest path `{}` contains no package: The manifest is virtual, \ + and the workspace has no members.", + ws.root().display() + ) + } + bail!("no packages to compile") + } + Ok(specs) + } +} + +#[derive(Debug)] +pub enum FilterRule { + All, + Just(Vec), +} + +#[derive(Debug)] +pub enum CompileFilter { + Default { + /// Flag whether targets can be safely skipped when required-features are not satisfied. + required_features_filterable: bool, + }, + Only { + all_targets: bool, + lib: bool, + bins: FilterRule, + examples: FilterRule, + tests: FilterRule, + benches: FilterRule, + }, +} + +pub fn compile<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, +) -> CargoResult> { + compile_with_exec(ws, options, Arc::new(DefaultExecutor)) +} + +/// Like `compile` but allows specifing a custom `Executor` that will be able to intercept build +/// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through. +pub fn compile_with_exec<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, + exec: Arc, +) -> CargoResult> { + for member in ws.members() { + for warning in member.manifest().warnings().iter() { + if warning.is_critical { + let err = format_err!("{}", warning.message); + let cx = format_err!( + "failed to parse manifest at `{}`", + member.manifest_path().display() + ); + return Err(err.context(cx).into()); + } else { + options.config.shell().warn(&warning.message)? + } + } + } + compile_ws(ws, None, options, exec) +} + +pub fn compile_ws<'a>( + ws: &Workspace<'a>, + source: Option>, + options: &CompileOptions<'a>, + exec: Arc, +) -> CargoResult> { + let CompileOptions { + config, + ref build_config, + ref spec, + ref features, + all_features, + no_default_features, + ref filter, + ref target_rustdoc_args, + ref target_rustc_args, + ref export_dir, + } = *options; + + let default_arch_kind = if build_config.requested_target.is_some() { + Kind::Target + } else { + Kind::Host + }; + + let specs = spec.into_package_id_specs(ws)?; + let features = Method::split_features(features); + let method = Method::Required { + dev_deps: ws.require_optional_deps() || filter.need_dev_deps(build_config.mode), + features: &features, + all_features, + uses_default_features: !no_default_features, + }; + let resolve = ops::resolve_ws_with_method(ws, source, method, &specs)?; + let (packages, resolve_with_overrides) = resolve; + + let to_builds = specs + .iter() + .map(|p| { + let pkgid = p.query(resolve_with_overrides.iter())?; + let p = packages.get(pkgid)?; + p.manifest().print_teapot(ws.config()); + Ok(p) + }) + .collect::>>()?; + + let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) { + (&Some(ref args), _) => (Some(args.clone()), "rustc"), + (_, &Some(ref args)) => (Some(args.clone()), "rustdoc"), + _ => (None, ""), + }; + + if extra_args.is_some() && to_builds.len() != 1 { + panic!( + "`{}` should not accept multiple `-p` flags", + extra_args_name + ); + } + + let profiles = ws.profiles(); + profiles.validate_packages(&mut config.shell(), &packages)?; + + let mut extra_compiler_args = None; + + let units = generate_targets( + ws, + profiles, + &to_builds, + filter, + default_arch_kind, + &resolve_with_overrides, + build_config, + )?; + + if let Some(args) = extra_args { + if units.len() != 1 { + bail!( + "extra arguments to `{}` can only be passed to one \ + target, consider filtering\nthe package by passing \ + e.g. `--lib` or `--bin NAME` to specify a single target", + extra_args_name + ); + } + extra_compiler_args = Some((units[0], args)); + } + + let mut ret = { + let _p = profile::start("compiling"); + let bcx = BuildContext::new( + ws, + &resolve_with_overrides, + &packages, + config, + &build_config, + profiles, + extra_compiler_args, + )?; + let mut cx = Context::new(config, &bcx)?; + cx.compile(&units, export_dir.clone(), &exec)? + }; + + ret.to_doc_test = to_builds.into_iter().cloned().collect(); + + return Ok(ret); +} + +impl FilterRule { + pub fn new(targets: Vec, all: bool) -> FilterRule { + if all { + FilterRule::All + } else { + FilterRule::Just(targets) + } + } + + fn matches(&self, target: &Target) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => targets.iter().any(|x| *x == target.name()), + } + } + + fn is_specific(&self) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => !targets.is_empty(), + } + } + + pub fn try_collect(&self) -> Option> { + match *self { + FilterRule::All => None, + FilterRule::Just(ref targets) => Some(targets.clone()), + } + } +} + +impl CompileFilter { + pub fn new( + lib_only: bool, + bins: Vec, + all_bins: bool, + tsts: Vec, + all_tsts: bool, + exms: Vec, + all_exms: bool, + bens: Vec, + all_bens: bool, + all_targets: bool, + ) -> CompileFilter { + let rule_bins = FilterRule::new(bins, all_bins); + let rule_tsts = FilterRule::new(tsts, all_tsts); + let rule_exms = FilterRule::new(exms, all_exms); + let rule_bens = FilterRule::new(bens, all_bens); + + if all_targets { + CompileFilter::Only { + all_targets: true, + lib: true, + bins: FilterRule::All, + examples: FilterRule::All, + benches: FilterRule::All, + tests: FilterRule::All, + } + } else if lib_only || rule_bins.is_specific() || rule_tsts.is_specific() + || rule_exms.is_specific() || rule_bens.is_specific() + { + CompileFilter::Only { + all_targets: false, + lib: lib_only, + bins: rule_bins, + examples: rule_exms, + benches: rule_bens, + tests: rule_tsts, + } + } else { + CompileFilter::Default { + required_features_filterable: true, + } + } + } + + pub fn need_dev_deps(&self, mode: CompileMode) -> bool { + match mode { + CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true, + CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self + { + CompileFilter::Default { .. } => false, + CompileFilter::Only { + ref examples, + ref tests, + ref benches, + .. + } => examples.is_specific() || tests.is_specific() || benches.is_specific(), + }, + CompileMode::RunCustomBuild => panic!("Invalid mode"), + } + } + + // this selects targets for "cargo run". for logic to select targets for + // other subcommands, see generate_targets and generate_default_targets + pub fn target_run(&self, target: &Target) -> bool { + match *self { + CompileFilter::Default { .. } => true, + CompileFilter::Only { + lib, + ref bins, + ref examples, + ref tests, + ref benches, + .. + } => { + let rule = match *target.kind() { + TargetKind::Bin => bins, + TargetKind::Test => tests, + TargetKind::Bench => benches, + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples, + TargetKind::Lib(..) => return lib, + TargetKind::CustomBuild => return false, + }; + rule.matches(target) + } + } + } + + pub fn is_specific(&self) -> bool { + match *self { + CompileFilter::Default { .. } => false, + CompileFilter::Only { .. } => true, + } + } +} + +/// Generates all the base targets for the packages the user has requested to +/// compile. Dependencies for these targets are computed later in +/// `unit_dependencies`. +fn generate_targets<'a>( + ws: &Workspace, + profiles: &Profiles, + packages: &[&'a Package], + filter: &CompileFilter, + default_arch_kind: Kind, + resolve: &Resolve, + build_config: &BuildConfig, +) -> CargoResult>> { + let mut units = Vec::new(); + + // Helper for creating a Unit struct. + let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| { + let profile_for = if build_config.mode.is_any_test() { + // NOTE: The ProfileFor here is subtle. If you have a profile + // with `panic` set, the `panic` flag is cleared for + // tests/benchmarks and their dependencies. If we left this + // as an "Any" profile, then the lib would get compiled three + // times (once with panic, once without, and once with + // --test). + // + // This would cause a problem for Doc tests, which would fail + // because `rustdoc` would attempt to link with both libraries + // at the same time. Also, it's probably not important (or + // even desirable?) for rustdoc to link with a lib with + // `panic` set. + // + // As a consequence, Examples and Binaries get compiled + // without `panic` set. This probably isn't a bad deal. + // + // Forcing the lib to be compiled three times during `cargo + // test` is probably also not desirable. + ProfileFor::TestDependency + } else { + ProfileFor::Any + }; + let target_mode = match target_mode { + CompileMode::Test => { + if target.is_example() && !filter.is_specific() && !target.tested() { + // Examples are included as regular binaries to verify + // that they compile. + CompileMode::Build + } else { + CompileMode::Test + } + } + CompileMode::Build => match *target.kind() { + TargetKind::Test => CompileMode::Test, + TargetKind::Bench => CompileMode::Bench, + _ => CompileMode::Build, + }, + _ => target_mode, + }; + // Plugins or proc-macro should be built for the host. + let kind = if target.for_host() { + Kind::Host + } else { + default_arch_kind + }; + let profile = profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + profile_for, + target_mode, + build_config.release, + ); + // Once the profile has been selected for benchmarks, we don't need to + // distinguish between benches and tests. Switching the mode allows + // de-duplication of units that are essentially identical. For + // example, `cargo build --all-targets --release` creates the units + // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) + // and since these are the same, we want them to be de-duped in + // `unit_dependencies`. + let target_mode = match target_mode { + CompileMode::Bench => CompileMode::Test, + _ => target_mode, + }; + Unit { + pkg, + target, + profile, + kind, + mode: target_mode, + } + }; + + for pkg in packages { + let features = resolve_all_features(resolve, pkg.package_id()); + // Create a list of proposed targets. The `bool` value indicates + // whether or not all required features *must* be present. If false, + // and the features are not available, then it will be silently + // skipped. Generally, targets specified by name (`--bin foo`) are + // required, all others can be silently skipped if features are + // missing. + let mut proposals: Vec<(Unit<'a>, bool)> = Vec::new(); + + match *filter { + CompileFilter::Default { + required_features_filterable, + } => { + let default_units = generate_default_targets(pkg.targets(), build_config.mode) + .iter() + .map(|t| { + ( + new_unit(pkg, t, build_config.mode), + !required_features_filterable, + ) + }) + .collect::>(); + proposals.extend(default_units); + if build_config.mode == CompileMode::Test { + // Include the lib as it will be required for doctests. + if let Some(t) = pkg.targets().iter().find(|t| t.is_lib() && t.doctested()) { + proposals.push((new_unit(pkg, t, CompileMode::Build), false)); + } + } + } + CompileFilter::Only { + all_targets, + lib, + ref bins, + ref examples, + ref tests, + ref benches, + } => { + if lib { + if let Some(target) = pkg.targets().iter().find(|t| t.is_lib()) { + proposals.push((new_unit(pkg, target, build_config.mode), false)); + } else if !all_targets { + bail!("no library targets found") + } + } + // If --tests was specified, add all targets that would be + // generated by `cargo test`. + let test_filter = match *tests { + FilterRule::All => Target::tested, + FilterRule::Just(_) => Target::is_test, + }; + let test_mode = match build_config.mode { + CompileMode::Build => CompileMode::Test, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => build_config.mode, + }; + // If --benches was specified, add all targets that would be + // generated by `cargo bench`. + let bench_filter = match *benches { + FilterRule::All => Target::benched, + FilterRule::Just(_) => Target::is_bench, + }; + let bench_mode = match build_config.mode { + CompileMode::Build => CompileMode::Bench, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => build_config.mode, + }; + + proposals.extend( + list_rule_targets(pkg, bins, "bin", Target::is_bin)? + .into_iter() + .map(|(t, required)| (new_unit(pkg, t, build_config.mode), required)) + .chain( + list_rule_targets(pkg, examples, "example", Target::is_example)? + .into_iter() + .map(|(t, required)| { + (new_unit(pkg, t, build_config.mode), required) + }), + ) + .chain( + list_rule_targets(pkg, tests, "test", test_filter)? + .into_iter() + .map(|(t, required)| (new_unit(pkg, t, test_mode), required)), + ) + .chain( + list_rule_targets(pkg, benches, "bench", bench_filter)? + .into_iter() + .map(|(t, required)| (new_unit(pkg, t, bench_mode), required)), + ) + .collect::>(), + ); + } + } + + // Only include targets that are libraries or have all required + // features available. + for (unit, required) in proposals { + let unavailable_features = match unit.target.required_features() { + Some(rf) => rf.iter().filter(|f| !features.contains(*f)).collect(), + None => Vec::new(), + }; + if unit.target.is_lib() || unavailable_features.is_empty() { + units.push(unit); + } else if required { + let required_features = unit.target.required_features().unwrap(); + let quoted_required_features: Vec = required_features + .iter() + .map(|s| format!("`{}`", s)) + .collect(); + bail!( + "target `{}` requires the features: {}\n\ + Consider enabling them by passing e.g. `--features=\"{}\"`", + unit.target.name(), + quoted_required_features.join(", "), + required_features.join(" ") + ); + } + // else, silently skip target. + } + } + Ok(units) +} + +fn resolve_all_features( + resolve_with_overrides: &Resolve, + package_id: &PackageId, +) -> HashSet { + let mut features = resolve_with_overrides.features(package_id).clone(); + + // Include features enabled for use by dependencies so targets can also use them with the + // required-features field when deciding whether to be built or skipped. + for (dep, _) in resolve_with_overrides.deps(package_id) { + for feature in resolve_with_overrides.features(dep) { + features.insert(dep.name().to_string() + "/" + feature); + } + } + + features +} + +/// Given a list of all targets for a package, filters out only the targets +/// that are automatically included when the user doesn't specify any targets. +fn generate_default_targets(targets: &[Target], mode: CompileMode) -> Vec<&Target> { + match mode { + CompileMode::Bench => targets.iter().filter(|t| t.benched()).collect(), + CompileMode::Test => targets + .iter() + .filter(|t| t.tested() || t.is_example()) + .collect(), + CompileMode::Build | CompileMode::Check { .. } => targets + .iter() + .filter(|t| t.is_bin() || t.is_lib()) + .collect(), + CompileMode::Doc { .. } => { + // `doc` does lib and bins (bin with same name as lib is skipped). + targets + .iter() + .filter(|t| { + t.documented() + && (!t.is_bin() + || !targets.iter().any(|l| l.is_lib() && l.name() == t.name())) + }) + .collect() + } + CompileMode::Doctest => { + // `test --doc`` + targets + .iter() + .find(|t| t.is_lib() && t.doctested()) + .into_iter() + .collect() + } + CompileMode::RunCustomBuild => panic!("Invalid mode"), + } +} + +/// Returns a list of targets based on command-line target selection flags. +/// The return value is a list of `(Target, bool)` pairs. The `bool` value +/// indicates whether or not all required features *must* be present. +fn list_rule_targets<'a>( + pkg: &'a Package, + rule: &FilterRule, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, +) -> CargoResult> { + match *rule { + FilterRule::All => Ok(pkg.targets() + .iter() + .filter(|t| is_expected_kind(t)) + .map(|t| (t, false)) + .collect()), + FilterRule::Just(ref names) => names + .iter() + .map(|name| find_target(pkg, name, target_desc, is_expected_kind)) + .collect(), + } +} + +/// Find the target for a specifically named target. +fn find_target<'a>( + pkg: &'a Package, + target_name: &str, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, +) -> CargoResult<(&'a Target, bool)> { + match pkg.targets() + .iter() + .find(|t| t.name() == target_name && is_expected_kind(t)) + { + // When a target is specified by name, required features *must* be + // available. + Some(t) => Ok((t, true)), + None => { + let suggestion = pkg.targets() + .iter() + .filter(|t| is_expected_kind(t)) + .map(|t| (lev_distance(target_name, t.name()), t)) + .filter(|&(d, _)| d < 4) + .min_by_key(|t| t.0) + .map(|t| t.1); + match suggestion { + Some(s) => bail!( + "no {} target named `{}`\n\nDid you mean `{}`?", + target_desc, + target_name, + s.name() + ), + None => bail!("no {} target named `{}`", target_desc, target_name), + } + } + } +} diff --git a/src/cargo/ops/cargo_doc.rs b/src/cargo/ops/cargo_doc.rs new file mode 100644 index 000000000..ab3aa62ce --- /dev/null +++ b/src/cargo/ops/cargo_doc.rs @@ -0,0 +1,153 @@ +use std::collections::HashMap; +use std::fs; +use std::path::Path; +use std::process::Command; + +use core::Workspace; +use ops; +use util::CargoResult; + +/// Strongly typed options for the `cargo doc` command. +#[derive(Debug)] +pub struct DocOptions<'a> { + /// Whether to attempt to open the browser after compiling the docs + pub open_result: bool, + /// Options to pass through to the compiler + pub compile_opts: ops::CompileOptions<'a>, +} + +/// Main method for `cargo doc`. +pub fn doc(ws: &Workspace, options: &DocOptions) -> CargoResult<()> { + let specs = options.compile_opts.spec.into_package_id_specs(ws)?; + let resolve = ops::resolve_ws_precisely( + ws, + None, + &options.compile_opts.features, + options.compile_opts.all_features, + options.compile_opts.no_default_features, + &specs, + )?; + let (packages, resolve_with_overrides) = resolve; + + let pkgs = specs + .iter() + .map(|p| { + let pkgid = p.query(resolve_with_overrides.iter())?; + packages.get(pkgid) + }) + .collect::>>()?; + + let mut lib_names = HashMap::new(); + let mut bin_names = HashMap::new(); + for package in &pkgs { + for target in package.targets().iter().filter(|t| t.documented()) { + if target.is_lib() { + if let Some(prev) = lib_names.insert(target.crate_name(), package) { + bail!( + "The library `{}` is specified by packages `{}` and \ + `{}` but can only be documented once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), + prev, + package + ); + } + } else if let Some(prev) = bin_names.insert(target.crate_name(), package) { + bail!( + "The binary `{}` is specified by packages `{}` and \ + `{}` but can be documented only once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), + prev, + package + ); + } + } + } + + ops::compile(ws, &options.compile_opts)?; + + if options.open_result { + let name = if pkgs.len() > 1 { + bail!( + "Passing multiple packages and `open` is not supported.\n\ + Please re-run this command with `-p ` where `` \ + is one of the following:\n {}", + pkgs.iter() + .map(|p| p.name().as_str()) + .collect::>() + .join("\n ") + ); + } else if pkgs.len() == 1 { + pkgs[0].name().replace("-", "_") + } else { + match lib_names.keys().chain(bin_names.keys()).nth(0) { + Some(s) => s.to_string(), + None => return Ok(()), + } + }; + + // Don't bother locking here as if this is getting deleted there's + // nothing we can do about it and otherwise if it's getting overwritten + // then that's also ok! + let mut target_dir = ws.target_dir(); + if let Some(ref triple) = options.compile_opts.build_config.requested_target { + target_dir.push(Path::new(triple).file_stem().unwrap()); + } + let path = target_dir.join("doc").join(&name).join("index.html"); + let path = path.into_path_unlocked(); + if fs::metadata(&path).is_ok() { + let mut shell = options.compile_opts.config.shell(); + shell.status("Opening", path.display())?; + match open_docs(&path) { + Ok(m) => shell.status("Launching", m)?, + Err(e) => { + shell.warn("warning: could not determine a browser to open docs with, tried:")?; + for method in e { + shell.warn(format!("\t{}", method))?; + } + } + } + } + } + + Ok(()) +} + +#[cfg(not(any(target_os = "windows", target_os = "macos")))] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + use std::env; + let mut methods = Vec::new(); + // trying $BROWSER + if let Ok(name) = env::var("BROWSER") { + match Command::new(name).arg(path).status() { + Ok(_) => return Ok("$BROWSER"), + Err(_) => methods.push("$BROWSER"), + } + } + + for m in ["xdg-open", "gnome-open", "kde-open"].iter() { + match Command::new(m).arg(path).status() { + Ok(_) => return Ok(m), + Err(_) => methods.push(m), + } + } + + Err(methods) +} + +#[cfg(target_os = "windows")] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + match Command::new("cmd").arg("/C").arg(path).status() { + Ok(_) => Ok("cmd /C"), + Err(_) => Err(vec!["cmd /C"]), + } +} + +#[cfg(target_os = "macos")] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + match Command::new("open").arg(path).status() { + Ok(_) => Ok("open"), + Err(_) => Err(vec!["open"]), + } +} diff --git a/src/cargo/ops/cargo_fetch.rs b/src/cargo/ops/cargo_fetch.rs new file mode 100644 index 000000000..ce9a788bc --- /dev/null +++ b/src/cargo/ops/cargo_fetch.rs @@ -0,0 +1,63 @@ +use core::compiler::{BuildConfig, CompileMode, Kind, TargetInfo}; +use core::{PackageSet, Resolve, Workspace}; +use ops; +use std::collections::HashSet; +use util::CargoResult; +use util::Config; + +pub struct FetchOptions<'a> { + pub config: &'a Config, + /// The target arch triple to fetch dependencies for + pub target: Option, +} + +/// Executes `cargo fetch`. +pub fn fetch<'a>( + ws: &Workspace<'a>, + options: &FetchOptions<'a>, +) -> CargoResult<(Resolve, PackageSet<'a>)> { + let (packages, resolve) = ops::resolve_ws(ws)?; + + let jobs = Some(1); + let config = ws.config(); + let build_config = BuildConfig::new(config, jobs, &options.target, CompileMode::Build)?; + let rustc = config.rustc(Some(ws))?; + let target_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; + { + let mut fetched_packages = HashSet::new(); + let mut deps_to_fetch = ws.members().map(|p| p.package_id()).collect::>(); + + while let Some(id) = deps_to_fetch.pop() { + if !fetched_packages.insert(id) { + continue; + } + + packages.get(id)?; + let deps = resolve.deps(id) + .filter(|&(_id, deps)| { + deps.iter() + .any(|d| { + // If no target was specified then all dependencies can + // be fetched. + let target = match options.target { + Some(ref t) => t, + None => return true, + }; + // If this dependency is only available for certain + // platforms, make sure we're only fetching it for that + // platform. + let platform = match d.platform() { + Some(p) => p, + None => return true, + }; + platform.matches(target, target_info.cfg()) + }) + }) + .map(|(id, _deps)| id); + deps_to_fetch.extend(deps); + } + } + + Ok((resolve, packages)) +} diff --git a/src/cargo/ops/cargo_generate_lockfile.rs b/src/cargo/ops/cargo_generate_lockfile.rs new file mode 100644 index 000000000..c71bb4aa8 --- /dev/null +++ b/src/cargo/ops/cargo_generate_lockfile.rs @@ -0,0 +1,213 @@ +use std::collections::{BTreeMap, HashSet}; + +use termcolor::Color::{self, Cyan, Green, Red}; + +use core::PackageId; +use core::registry::PackageRegistry; +use core::{Resolve, SourceId, Workspace}; +use core::resolver::Method; +use ops; +use util::config::Config; +use util::CargoResult; + +pub struct UpdateOptions<'a> { + pub config: &'a Config, + pub to_update: Vec, + pub precise: Option<&'a str>, + pub aggressive: bool, +} + +pub fn generate_lockfile(ws: &Workspace) -> CargoResult<()> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = ops::resolve_with_previous( + &mut registry, + ws, + Method::Everything, + None, + None, + &[], + true, + true, + )?; + ops::write_pkg_lockfile(ws, &resolve)?; + Ok(()) +} + +pub fn update_lockfile(ws: &Workspace, opts: &UpdateOptions) -> CargoResult<()> { + if opts.aggressive && opts.precise.is_some() { + bail!("cannot specify both aggressive and precise simultaneously") + } + + if ws.members().is_empty() { + bail!("you can't generate a lockfile for an empty workspace.") + } + + if opts.config.cli_unstable().offline { + bail!("you can't update in the offline mode"); + } + + let previous_resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => return generate_lockfile(ws), + }; + let mut registry = PackageRegistry::new(opts.config)?; + let mut to_avoid = HashSet::new(); + + if opts.to_update.is_empty() { + to_avoid.extend(previous_resolve.iter()); + } else { + let mut sources = Vec::new(); + for name in opts.to_update.iter() { + let dep = previous_resolve.query(name)?; + if opts.aggressive { + fill_with_deps(&previous_resolve, dep, &mut to_avoid, &mut HashSet::new()); + } else { + to_avoid.insert(dep); + sources.push(match opts.precise { + Some(precise) => { + // TODO: see comment in `resolve.rs` as well, but this + // seems like a pretty hokey reason to single out + // the registry as well. + let precise = if dep.source_id().is_registry() { + format!("{}={}->{}", dep.name(), dep.version(), precise) + } else { + precise.to_string() + }; + dep.source_id().clone().with_precise(Some(precise)) + } + None => dep.source_id().clone().with_precise(None), + }); + } + } + registry.add_sources(&sources)?; + } + + let resolve = ops::resolve_with_previous( + &mut registry, + ws, + Method::Everything, + Some(&previous_resolve), + Some(&to_avoid), + &[], + true, + true, + )?; + + // Summarize what is changing for the user. + let print_change = |status: &str, msg: String, color: Color| { + opts.config.shell().status_with_color(status, msg, color) + }; + for (removed, added) in compare_dependency_graphs(&previous_resolve, &resolve) { + if removed.len() == 1 && added.len() == 1 { + let msg = if removed[0].source_id().is_git() { + format!( + "{} -> #{}", + removed[0], + &added[0].source_id().precise().unwrap()[..8] + ) + } else { + format!("{} -> v{}", removed[0], added[0].version()) + }; + print_change("Updating", msg, Green)?; + } else { + for package in removed.iter() { + print_change("Removing", format!("{}", package), Red)?; + } + for package in added.iter() { + print_change("Adding", format!("{}", package), Cyan)?; + } + } + } + + ops::write_pkg_lockfile(ws, &resolve)?; + return Ok(()); + + fn fill_with_deps<'a>( + resolve: &'a Resolve, + dep: &'a PackageId, + set: &mut HashSet<&'a PackageId>, + visited: &mut HashSet<&'a PackageId>, + ) { + if !visited.insert(dep) { + return; + } + set.insert(dep); + for dep in resolve.deps_not_replaced(dep) { + fill_with_deps(resolve, dep, set, visited); + } + } + + fn compare_dependency_graphs<'a>( + previous_resolve: &'a Resolve, + resolve: &'a Resolve, + ) -> Vec<(Vec<&'a PackageId>, Vec<&'a PackageId>)> { + fn key(dep: &PackageId) -> (&str, &SourceId) { + (dep.name().as_str(), dep.source_id()) + } + + // Removes all package ids in `b` from `a`. Note that this is somewhat + // more complicated because the equality for source ids does not take + // precise versions into account (e.g. git shas), but we want to take + // that into account here. + fn vec_subtract<'a>(a: &[&'a PackageId], b: &[&'a PackageId]) -> Vec<&'a PackageId> { + a.iter() + .filter(|a| { + // If this package id is not found in `b`, then it's definitely + // in the subtracted set + let i = match b.binary_search(a) { + Ok(i) => i, + Err(..) => return true, + }; + + // If we've found `a` in `b`, then we iterate over all instances + // (we know `b` is sorted) and see if they all have different + // precise versions. If so, then `a` isn't actually in `b` so + // we'll let it through. + // + // Note that we only check this for non-registry sources, + // however, as registries contain enough version information in + // the package id to disambiguate + if a.source_id().is_registry() { + return false; + } + b[i..] + .iter() + .take_while(|b| a == b) + .all(|b| a.source_id().precise() != b.source_id().precise()) + }) + .cloned() + .collect() + } + + // Map (package name, package source) to (removed versions, added versions). + let mut changes = BTreeMap::new(); + let empty = (Vec::new(), Vec::new()); + for dep in previous_resolve.iter() { + changes + .entry(key(dep)) + .or_insert_with(|| empty.clone()) + .0 + .push(dep); + } + for dep in resolve.iter() { + changes + .entry(key(dep)) + .or_insert_with(|| empty.clone()) + .1 + .push(dep); + } + + for v in changes.values_mut() { + let (ref mut old, ref mut new) = *v; + old.sort(); + new.sort(); + let removed = vec_subtract(old, new); + let added = vec_subtract(new, old); + *old = removed; + *new = added; + } + debug!("{:#?}", changes); + + changes.into_iter().map(|(_, v)| v).collect() + } +} diff --git a/src/cargo/ops/cargo_install.rs b/src/cargo/ops/cargo_install.rs new file mode 100644 index 000000000..d5f09ff9b --- /dev/null +++ b/src/cargo/ops/cargo_install.rs @@ -0,0 +1,819 @@ +use std::collections::btree_map::Entry; +use std::collections::{BTreeMap, BTreeSet}; +use std::{env, fs}; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use semver::{Version, VersionReq}; +use tempfile::Builder as TempFileBuilder; +use toml; + +use core::{Dependency, Edition, Package, PackageIdSpec, Source, SourceId}; +use core::{PackageId, Workspace}; +use core::compiler::DefaultExecutor; +use ops::{self, CompileFilter}; +use sources::{GitSource, PathSource, SourceConfigMap}; +use util::{internal, Config}; +use util::{FileLock, Filesystem}; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; + +#[derive(Deserialize, Serialize)] +#[serde(untagged)] +enum CrateListing { + V1(CrateListingV1), + Empty(Empty), +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +struct Empty {} + +#[derive(Deserialize, Serialize)] +struct CrateListingV1 { + v1: BTreeMap>, +} + +struct Transaction { + bins: Vec, +} + +impl Transaction { + fn success(mut self) { + self.bins.clear(); + } +} + +impl Drop for Transaction { + fn drop(&mut self) { + for bin in self.bins.iter() { + let _ = paths::remove_file(bin); + } + } +} + +pub fn install( + root: Option<&str>, + krates: Vec<&str>, + source_id: &SourceId, + from_cwd: bool, + vers: Option<&str>, + opts: &ops::CompileOptions, + force: bool, +) -> CargoResult<()> { + let root = resolve_root(root, opts.config)?; + let map = SourceConfigMap::new(opts.config)?; + + let (installed_anything, scheduled_error) = if krates.len() <= 1 { + install_one( + &root, + &map, + krates.into_iter().next(), + source_id, + from_cwd, + vers, + opts, + force, + true, + )?; + (true, false) + } else { + let mut succeeded = vec![]; + let mut failed = vec![]; + let mut first = true; + for krate in krates { + let root = root.clone(); + let map = map.clone(); + match install_one( + &root, + &map, + Some(krate), + source_id, + from_cwd, + vers, + opts, + force, + first, + ) { + Ok(()) => succeeded.push(krate), + Err(e) => { + ::handle_error(e, &mut opts.config.shell()); + failed.push(krate) + } + } + first = false; + } + + let mut summary = vec![]; + if !succeeded.is_empty() { + summary.push(format!("Successfully installed {}!", succeeded.join(", "))); + } + if !failed.is_empty() { + summary.push(format!( + "Failed to install {} (see error(s) above).", + failed.join(", ") + )); + } + if !succeeded.is_empty() || !failed.is_empty() { + opts.config.shell().status("Summary", summary.join(" "))?; + } + + (!succeeded.is_empty(), !failed.is_empty()) + }; + + if installed_anything { + // Print a warning that if this directory isn't in PATH that they won't be + // able to run these commands. + let dst = metadata(opts.config, &root)?.parent().join("bin"); + let path = env::var_os("PATH").unwrap_or_default(); + for path in env::split_paths(&path) { + if path == dst { + return Ok(()); + } + } + + opts.config.shell().warn(&format!( + "be sure to add `{}` to your PATH to be \ + able to run the installed binaries", + dst.display() + ))?; + } + + if scheduled_error { + bail!("some crates failed to install"); + } + + Ok(()) +} + +fn install_one( + root: &Filesystem, + map: &SourceConfigMap, + krate: Option<&str>, + source_id: &SourceId, + from_cwd: bool, + vers: Option<&str>, + opts: &ops::CompileOptions, + force: bool, + is_first_install: bool, +) -> CargoResult<()> { + let config = opts.config; + + let (pkg, source) = if source_id.is_git() { + select_pkg( + GitSource::new(source_id, config)?, + krate, + vers, + config, + is_first_install, + &mut |git| git.read_packages(), + )? + } else if source_id.is_path() { + let path = source_id + .url() + .to_file_path() + .map_err(|()| format_err!("path sources must have a valid path"))?; + let mut src = PathSource::new(&path, source_id, config); + src.update().chain_err(|| { + format_err!( + "`{}` is not a crate root; specify a crate to \ + install from crates.io, or use --path or --git to \ + specify an alternate source", + path.display() + ) + })?; + select_pkg( + PathSource::new(&path, source_id, config), + krate, + vers, + config, + is_first_install, + &mut |path| path.read_packages(), + )? + } else { + select_pkg( + map.load(source_id)?, + krate, + vers, + config, + is_first_install, + &mut |_| { + bail!( + "must specify a crate to install from \ + crates.io, or use --path or --git to \ + specify alternate source" + ) + }, + )? + }; + + let mut td_opt = None; + let mut needs_cleanup = false; + let overidden_target_dir = if source_id.is_path() { + None + } else if let Some(dir) = config.target_dir()? { + Some(dir) + } else if let Ok(td) = TempFileBuilder::new().prefix("cargo-install").tempdir() { + let p = td.path().to_owned(); + td_opt = Some(td); + Some(Filesystem::new(p)) + } else { + needs_cleanup = true; + Some(Filesystem::new(config.cwd().join("target-install"))) + }; + + let ws = Workspace::ephemeral(pkg, config, overidden_target_dir, false)?; + let pkg = ws.current()?; + + if from_cwd { + match pkg.manifest().edition() { + Edition::Edition2015 => config.shell().warn( + "To build the current package use `cargo build`, \ + to install the current package run `cargo install --path .`", + )?, + Edition::Edition2018 => bail!( + "To build the current package use `cargo build`, \ + to install the current package run `cargo install --path .`, \ + otherwise specify a crate to install from \ + crates.io, or use --path or --git to \ + specify alternate source" + ), + } + }; + + config.shell().status("Installing", pkg)?; + + // Preflight checks to check up front whether we'll overwrite something. + // We have to check this again afterwards, but may as well avoid building + // anything if we're gonna throw it away anyway. + { + let metadata = metadata(config, root)?; + let list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + check_overwrites(&dst, pkg, &opts.filter, &list, force)?; + } + + let compile = + ops::compile_ws(&ws, Some(source), opts, Arc::new(DefaultExecutor)).chain_err(|| { + if let Some(td) = td_opt.take() { + // preserve the temporary directory, so the user can inspect it + td.into_path(); + } + + format_err!( + "failed to compile `{}`, intermediate artifacts can be \ + found at `{}`", + pkg, + ws.target_dir().display() + ) + })?; + let binaries: Vec<(&str, &Path)> = compile + .binaries + .iter() + .map(|bin| { + let name = bin.file_name().unwrap(); + if let Some(s) = name.to_str() { + Ok((s, bin.as_ref())) + } else { + bail!("Binary `{:?}` name can't be serialized into string", name) + } + }) + .collect::>()?; + if binaries.is_empty() { + bail!( + "no binaries are available for install using the selected \ + features" + ); + } + + let metadata = metadata(config, root)?; + let mut list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + let duplicates = check_overwrites(&dst, pkg, &opts.filter, &list, force)?; + + fs::create_dir_all(&dst)?; + + // Copy all binaries to a temporary directory under `dst` first, catching + // some failure modes (e.g. out of space) before touching the existing + // binaries. This directory will get cleaned up via RAII. + let staging_dir = TempFileBuilder::new() + .prefix("cargo-install") + .tempdir_in(&dst)?; + for &(bin, src) in binaries.iter() { + let dst = staging_dir.path().join(bin); + // Try to move if `target_dir` is transient. + if !source_id.is_path() && fs::rename(src, &dst).is_ok() { + continue; + } + fs::copy(src, &dst).chain_err(|| { + format_err!("failed to copy `{}` to `{}`", src.display(), dst.display()) + })?; + } + + let (to_replace, to_install): (Vec<&str>, Vec<&str>) = binaries + .iter() + .map(|&(bin, _)| bin) + .partition(|&bin| duplicates.contains_key(bin)); + + let mut installed = Transaction { bins: Vec::new() }; + + // Move the temporary copies into `dst` starting with new binaries. + for bin in to_install.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Installing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + format_err!("failed to move `{}` to `{}`", src.display(), dst.display()) + })?; + installed.bins.push(dst); + } + + // Repeat for binaries which replace existing ones but don't pop the error + // up until after updating metadata. + let mut replaced_names = Vec::new(); + let result = { + let mut try_install = || -> CargoResult<()> { + for &bin in to_replace.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Replacing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + format_err!("failed to move `{}` to `{}`", src.display(), dst.display()) + })?; + replaced_names.push(bin); + } + Ok(()) + }; + try_install() + }; + + // Update records of replaced binaries. + for &bin in replaced_names.iter() { + if let Some(&Some(ref p)) = duplicates.get(bin) { + if let Some(set) = list.v1.get_mut(p) { + set.remove(bin); + } + } + list.v1 + .entry(pkg.package_id().clone()) + .or_insert_with(BTreeSet::new) + .insert(bin.to_string()); + } + + // Remove empty metadata lines. + let pkgs = list.v1 + .iter() + .filter_map(|(p, set)| { + if set.is_empty() { + Some(p.clone()) + } else { + None + } + }) + .collect::>(); + for p in pkgs.iter() { + list.v1.remove(p); + } + + // If installation was successful record newly installed binaries. + if result.is_ok() { + list.v1 + .entry(pkg.package_id().clone()) + .or_insert_with(BTreeSet::new) + .extend(to_install.iter().map(|s| s.to_string())); + } + + let write_result = write_crate_list(&metadata, list); + match write_result { + // Replacement error (if any) isn't actually caused by write error + // but this seems to be the only way to show both. + Err(err) => result.chain_err(|| err)?, + Ok(_) => result?, + } + + // Reaching here means all actions have succeeded. Clean up. + installed.success(); + if needs_cleanup { + // Don't bother grabbing a lock as we're going to blow it all away + // anyway. + let target_dir = ws.target_dir().into_path_unlocked(); + paths::remove_dir_all(&target_dir)?; + } + + Ok(()) +} + +fn select_pkg<'a, T>( + mut source: T, + name: Option<&str>, + vers: Option<&str>, + config: &Config, + needs_update: bool, + list_all: &mut FnMut(&mut T) -> CargoResult>, +) -> CargoResult<(Package, Box)> +where + T: Source + 'a, +{ + if needs_update { + source.update()?; + } + + match name { + Some(name) => { + let vers = match vers { + Some(v) => { + // If the version begins with character <, >, =, ^, ~ parse it as a + // version range, otherwise parse it as a specific version + let first = v.chars() + .nth(0) + .ok_or_else(|| format_err!("no version provided for the `--vers` flag"))?; + + match first { + '<' | '>' | '=' | '^' | '~' => match v.parse::() { + Ok(v) => Some(v.to_string()), + Err(_) => bail!( + "the `--vers` provided, `{}`, is \ + not a valid semver version requirement\n\n + Please have a look at \ + http://doc.crates.io/specifying-dependencies.html \ + for the correct format", + v + ), + }, + _ => match v.parse::() { + Ok(v) => Some(format!("={}", v)), + Err(_) => { + let mut msg = format!( + "\ + the `--vers` provided, `{}`, is \ + not a valid semver version\n\n\ + historically Cargo treated this \ + as a semver version requirement \ + accidentally\nand will continue \ + to do so, but this behavior \ + will be removed eventually", + v + ); + + // If it is not a valid version but it is a valid version + // requirement, add a note to the warning + if v.parse::().is_ok() { + msg.push_str(&format!( + "\nif you want to specify semver range, \ + add an explicit qualifier, like ^{}", + v + )); + } + config.shell().warn(&msg)?; + Some(v.to_string()) + } + }, + } + } + None => None, + }; + let vers = vers.as_ref().map(|s| &**s); + let dep = Dependency::parse_no_deprecated( + name, + Some(vers.unwrap_or("*")), + source.source_id(), + )?; + let deps = source.query_vec(&dep)?; + match deps.iter().map(|p| p.package_id()).max() { + Some(pkgid) => { + let pkg = source.download(pkgid)?; + Ok((pkg, Box::new(source))) + } + None => { + let vers_info = vers.map(|v| format!(" with version `{}`", v)) + .unwrap_or_default(); + Err(format_err!( + "could not find `{}` in {}{}", + name, + source.source_id(), + vers_info + )) + } + } + } + None => { + let candidates = list_all(&mut source)?; + let binaries = candidates + .iter() + .filter(|cand| cand.targets().iter().filter(|t| t.is_bin()).count() > 0); + let examples = candidates + .iter() + .filter(|cand| cand.targets().iter().filter(|t| t.is_example()).count() > 0); + let pkg = match one(binaries, |v| multi_err("binaries", v))? { + Some(p) => p, + None => match one(examples, |v| multi_err("examples", v))? { + Some(p) => p, + None => bail!( + "no packages found with binaries or \ + examples" + ), + }, + }; + return Ok((pkg.clone(), Box::new(source))); + + fn multi_err(kind: &str, mut pkgs: Vec<&Package>) -> String { + pkgs.sort_by(|a, b| a.name().cmp(&b.name())); + format!( + "multiple packages with {} found: {}", + kind, + pkgs.iter() + .map(|p| p.name().as_str()) + .collect::>() + .join(", ") + ) + } + } + } +} + +fn one(mut i: I, f: F) -> CargoResult> +where + I: Iterator, + F: FnOnce(Vec) -> String, +{ + match (i.next(), i.next()) { + (Some(i1), Some(i2)) => { + let mut v = vec![i1, i2]; + v.extend(i); + Err(format_err!("{}", f(v))) + } + (Some(i), None) => Ok(Some(i)), + (None, _) => Ok(None), + } +} + +fn check_overwrites( + dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1, + force: bool, +) -> CargoResult>> { + // If explicit --bin or --example flags were passed then those'll + // get checked during cargo_compile, we only care about the "build + // everything" case here + if !filter.is_specific() && !pkg.targets().iter().any(|t| t.is_bin()) { + bail!("specified package has no binaries") + } + let duplicates = find_duplicates(dst, pkg, filter, prev); + if force || duplicates.is_empty() { + return Ok(duplicates); + } + // Format the error message. + let mut msg = String::new(); + for (bin, p) in duplicates.iter() { + msg.push_str(&format!("binary `{}` already exists in destination", bin)); + if let Some(p) = p.as_ref() { + msg.push_str(&format!(" as part of `{}`\n", p)); + } else { + msg.push_str("\n"); + } + } + msg.push_str("Add --force to overwrite"); + Err(format_err!("{}", msg)) +} + +fn find_duplicates( + dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1, +) -> BTreeMap> { + let check = |name: String| { + // Need to provide type, works around Rust Issue #93349 + let name = format!("{}{}", name, env::consts::EXE_SUFFIX); + if fs::metadata(dst.join(&name)).is_err() { + None + } else if let Some((p, _)) = prev.v1.iter().find(|&(_, v)| v.contains(&name)) { + Some((name, Some(p.clone()))) + } else { + Some((name, None)) + } + }; + match *filter { + CompileFilter::Default { .. } => pkg.targets() + .iter() + .filter(|t| t.is_bin()) + .filter_map(|t| check(t.name().to_string())) + .collect(), + CompileFilter::Only { + ref bins, + ref examples, + .. + } => { + let all_bins: Vec = bins.try_collect().unwrap_or_else(|| { + pkg.targets() + .iter() + .filter(|t| t.is_bin()) + .map(|t| t.name().to_string()) + .collect() + }); + let all_examples: Vec = examples.try_collect().unwrap_or_else(|| { + pkg.targets() + .iter() + .filter(|t| t.is_bin_example()) + .map(|t| t.name().to_string()) + .collect() + }); + + all_bins + .iter() + .chain(all_examples.iter()) + .filter_map(|t| check(t.clone())) + .collect::>>() + } + } +} + +fn read_crate_list(file: &FileLock) -> CargoResult { + let listing = (|| -> CargoResult<_> { + let mut contents = String::new(); + file.file().read_to_string(&mut contents)?; + let listing = + toml::from_str(&contents).chain_err(|| internal("invalid TOML found for metadata"))?; + match listing { + CrateListing::V1(v1) => Ok(v1), + CrateListing::Empty(_) => Ok(CrateListingV1 { + v1: BTreeMap::new(), + }), + } + })() + .chain_err(|| { + format_err!( + "failed to parse crate metadata at `{}`", + file.path().to_string_lossy() + ) + })?; + Ok(listing) +} + +fn write_crate_list(file: &FileLock, listing: CrateListingV1) -> CargoResult<()> { + (|| -> CargoResult<_> { + let mut file = file.file(); + file.seek(SeekFrom::Start(0))?; + file.set_len(0)?; + let data = toml::to_string(&CrateListing::V1(listing))?; + file.write_all(data.as_bytes())?; + Ok(()) + })() + .chain_err(|| { + format_err!( + "failed to write crate metadata at `{}`", + file.path().to_string_lossy() + ) + })?; + Ok(()) +} + +pub fn install_list(dst: Option<&str>, config: &Config) -> CargoResult<()> { + let dst = resolve_root(dst, config)?; + let dst = metadata(config, &dst)?; + let list = read_crate_list(&dst)?; + for (k, v) in list.v1.iter() { + println!("{}:", k); + for bin in v { + println!(" {}", bin); + } + } + Ok(()) +} + +pub fn uninstall( + root: Option<&str>, + specs: Vec<&str>, + bins: &[String], + config: &Config, +) -> CargoResult<()> { + if specs.len() > 1 && !bins.is_empty() { + bail!("A binary can only be associated with a single installed package, specifying multiple specs with --bin is redundant."); + } + + let root = resolve_root(root, config)?; + let scheduled_error = if specs.len() == 1 { + uninstall_one(&root, specs[0], bins, config)?; + false + } else { + let mut succeeded = vec![]; + let mut failed = vec![]; + for spec in specs { + let root = root.clone(); + match uninstall_one(&root, spec, bins, config) { + Ok(()) => succeeded.push(spec), + Err(e) => { + ::handle_error(e, &mut config.shell()); + failed.push(spec) + } + } + } + + let mut summary = vec![]; + if !succeeded.is_empty() { + summary.push(format!( + "Successfully uninstalled {}!", + succeeded.join(", ") + )); + } + if !failed.is_empty() { + summary.push(format!( + "Failed to uninstall {} (see error(s) above).", + failed.join(", ") + )); + } + + if !succeeded.is_empty() || !failed.is_empty() { + config.shell().status("Summary", summary.join(" "))?; + } + + !failed.is_empty() + }; + + if scheduled_error { + bail!("some packages failed to uninstall"); + } + + Ok(()) +} + +pub fn uninstall_one( + root: &Filesystem, + spec: &str, + bins: &[String], + config: &Config, +) -> CargoResult<()> { + let crate_metadata = metadata(config, root)?; + let mut metadata = read_crate_list(&crate_metadata)?; + let mut to_remove = Vec::new(); + { + let result = PackageIdSpec::query_str(spec, metadata.v1.keys())?.clone(); + let mut installed = match metadata.v1.entry(result.clone()) { + Entry::Occupied(e) => e, + Entry::Vacant(..) => panic!("entry not found: {}", result), + }; + let dst = crate_metadata.parent().join("bin"); + for bin in installed.get() { + let bin = dst.join(bin); + if fs::metadata(&bin).is_err() { + bail!( + "corrupt metadata, `{}` does not exist when it should", + bin.display() + ) + } + } + + let bins = bins.iter() + .map(|s| { + if s.ends_with(env::consts::EXE_SUFFIX) { + s.to_string() + } else { + format!("{}{}", s, env::consts::EXE_SUFFIX) + } + }) + .collect::>(); + + for bin in bins.iter() { + if !installed.get().contains(bin) { + bail!("binary `{}` not installed as part of `{}`", bin, result) + } + } + + if bins.is_empty() { + to_remove.extend(installed.get().iter().map(|b| dst.join(b))); + installed.get_mut().clear(); + } else { + for bin in bins.iter() { + to_remove.push(dst.join(bin)); + installed.get_mut().remove(bin); + } + } + if installed.get().is_empty() { + installed.remove(); + } + } + write_crate_list(&crate_metadata, metadata)?; + for bin in to_remove { + config.shell().status("Removing", bin.display())?; + paths::remove_file(bin)?; + } + + Ok(()) +} + +fn metadata(config: &Config, root: &Filesystem) -> CargoResult { + root.open_rw(Path::new(".crates.toml"), config, "crate metadata") +} + +fn resolve_root(flag: Option<&str>, config: &Config) -> CargoResult { + let config_root = config.get_path("install.root")?; + Ok(flag.map(PathBuf::from) + .or_else(|| env::var_os("CARGO_INSTALL_ROOT").map(PathBuf::from)) + .or_else(move || config_root.map(|v| v.val)) + .map(Filesystem::new) + .unwrap_or_else(|| config.home().clone())) +} diff --git a/src/cargo/ops/cargo_new.rs b/src/cargo/ops/cargo_new.rs new file mode 100644 index 000000000..8bd99ca34 --- /dev/null +++ b/src/cargo/ops/cargo_new.rs @@ -0,0 +1,672 @@ +use std::collections::BTreeMap; +use std::env; +use std::fs; +use std::fmt; +use std::path::{Path, PathBuf}; + +use git2::Config as GitConfig; +use git2::Repository as GitRepository; + +use core::{compiler, Workspace}; +use util::{internal, FossilRepo, GitRepo, HgRepo, PijulRepo}; +use util::{paths, Config}; +use util::errors::{CargoResult, CargoResultExt}; + +use toml; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum VersionControl { + Git, + Hg, + Pijul, + Fossil, + NoVcs, +} + +#[derive(Debug)] +pub struct NewOptions { + pub version_control: Option, + pub kind: NewProjectKind, + /// Absolute path to the directory for the new project + pub path: PathBuf, + pub name: Option, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum NewProjectKind { + Bin, + Lib, +} + +impl NewProjectKind { + fn is_bin(&self) -> bool { + *self == NewProjectKind::Bin + } +} + +impl fmt::Display for NewProjectKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + NewProjectKind::Bin => "binary (application)", + NewProjectKind::Lib => "library", + }.fmt(f) + } +} + +struct SourceFileInformation { + relative_path: String, + target_name: String, + bin: bool, +} + +struct MkOptions<'a> { + version_control: Option, + path: &'a Path, + name: &'a str, + source_files: Vec, + bin: bool, +} + +impl NewOptions { + pub fn new( + version_control: Option, + bin: bool, + lib: bool, + path: PathBuf, + name: Option, + ) -> CargoResult { + let kind = match (bin, lib) { + (true, true) => bail!("can't specify both lib and binary outputs"), + (false, true) => NewProjectKind::Lib, + // default to bin + (_, false) => NewProjectKind::Bin, + }; + + let opts = NewOptions { + version_control, + kind, + path, + name, + }; + Ok(opts) + } +} + +struct CargoNewConfig { + name: Option, + email: Option, + version_control: Option, +} + +fn get_name<'a>(path: &'a Path, opts: &'a NewOptions) -> CargoResult<&'a str> { + if let Some(ref name) = opts.name { + return Ok(name); + } + + let file_name = path.file_name().ok_or_else(|| { + format_err!( + "cannot auto-detect project name from path {:?} ; use --name to override", + path.as_os_str() + ) + })?; + + file_name.to_str().ok_or_else(|| { + format_err!( + "cannot create project with a non-unicode name: {:?}", + file_name + ) + }) +} + +fn check_name(name: &str, opts: &NewOptions) -> CargoResult<()> { + // If --name is already used to override, no point in suggesting it + // again as a fix. + let name_help = match opts.name { + Some(_) => "", + None => "\nuse --name to override crate name", + }; + + // Ban keywords + test list found at + // https://doc.rust-lang.org/grammar.html#keywords + let blacklist = [ + "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", "do", + "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", "let", "loop", + "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", "proc", "pub", + "pure", "ref", "return", "self", "sizeof", "static", "struct", "super", "test", "trait", + "true", "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield", + ]; + if blacklist.contains(&name) || (opts.kind.is_bin() && compiler::is_bad_artifact_name(name)) { + bail!( + "The name `{}` cannot be used as a crate name{}", + name, + name_help + ) + } + + if let Some(ref c) = name.chars().nth(0) { + if c.is_digit(10) { + bail!( + "Package names starting with a digit cannot be used as a crate name{}", + name_help + ) + } + } + + for c in name.chars() { + if c.is_alphanumeric() { + continue; + } + if c == '_' || c == '-' { + continue; + } + bail!( + "Invalid character `{}` in crate name: `{}`{}", + c, + name, + name_help + ) + } + Ok(()) +} + +fn detect_source_paths_and_types( + project_path: &Path, + project_name: &str, + detected_files: &mut Vec, +) -> CargoResult<()> { + let path = project_path; + let name = project_name; + + enum H { + Bin, + Lib, + Detect, + } + + struct Test { + proposed_path: String, + handling: H, + } + + let tests = vec![ + Test { + proposed_path: format!("src/main.rs"), + handling: H::Bin, + }, + Test { + proposed_path: format!("main.rs"), + handling: H::Bin, + }, + Test { + proposed_path: format!("src/{}.rs", name), + handling: H::Detect, + }, + Test { + proposed_path: format!("{}.rs", name), + handling: H::Detect, + }, + Test { + proposed_path: format!("src/lib.rs"), + handling: H::Lib, + }, + Test { + proposed_path: format!("lib.rs"), + handling: H::Lib, + }, + ]; + + for i in tests { + let pp = i.proposed_path; + + // path/pp does not exist or is not a file + if !fs::metadata(&path.join(&pp)) + .map(|x| x.is_file()) + .unwrap_or(false) + { + continue; + } + + let sfi = match i.handling { + H::Bin => SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: true, + }, + H::Lib => SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: false, + }, + H::Detect => { + let content = paths::read(&path.join(pp.clone()))?; + let isbin = content.contains("fn main"); + SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: isbin, + } + } + }; + detected_files.push(sfi); + } + + // Check for duplicate lib attempt + + let mut previous_lib_relpath: Option<&str> = None; + let mut duplicates_checker: BTreeMap<&str, &SourceFileInformation> = BTreeMap::new(); + + for i in detected_files { + if i.bin { + if let Some(x) = BTreeMap::get::(&duplicates_checker, i.target_name.as_ref()) { + bail!( + "\ +multiple possible binary sources found: + {} + {} +cannot automatically generate Cargo.toml as the main target would be ambiguous", + &x.relative_path, + &i.relative_path + ); + } + duplicates_checker.insert(i.target_name.as_ref(), i); + } else { + if let Some(plp) = previous_lib_relpath { + bail!( + "cannot have a project with \ + multiple libraries, \ + found both `{}` and `{}`", + plp, + i.relative_path + ) + } + previous_lib_relpath = Some(&i.relative_path); + } + } + + Ok(()) +} + +fn plan_new_source_file(bin: bool, project_name: String) -> SourceFileInformation { + if bin { + SourceFileInformation { + relative_path: "src/main.rs".to_string(), + target_name: project_name, + bin: true, + } + } else { + SourceFileInformation { + relative_path: "src/lib.rs".to_string(), + target_name: project_name, + bin: false, + } + } +} + +pub fn new(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = &opts.path; + if fs::metadata(path).is_ok() { + bail!( + "destination `{}` already exists\n\n\ + Use `cargo init` to initialize the directory", + path.display() + ) + } + + let name = get_name(path, opts)?; + check_name(name, opts)?; + + let mkopts = MkOptions { + version_control: opts.version_control, + path, + name, + source_files: vec![plan_new_source_file(opts.kind.is_bin(), name.to_string())], + bin: opts.kind.is_bin(), + }; + + mk(config, &mkopts).chain_err(|| { + format_err!( + "Failed to create project `{}` at `{}`", + name, + path.display() + ) + })?; + Ok(()) +} + +pub fn init(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = &opts.path; + + if fs::metadata(&path.join("Cargo.toml")).is_ok() { + bail!("`cargo init` cannot be run on existing Cargo projects") + } + + let name = get_name(path, opts)?; + check_name(name, opts)?; + + let mut src_paths_types = vec![]; + + detect_source_paths_and_types(path, name, &mut src_paths_types)?; + + if src_paths_types.is_empty() { + src_paths_types.push(plan_new_source_file(opts.kind.is_bin(), name.to_string())); + } else { + // --bin option may be ignored if lib.rs or src/lib.rs present + // Maybe when doing `cargo init --bin` inside a library project stub, + // user may mean "initialize for library, but also add binary target" + } + + let mut version_control = opts.version_control; + + if version_control == None { + let mut num_detected_vsces = 0; + + if fs::metadata(&path.join(".git")).is_ok() { + version_control = Some(VersionControl::Git); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".hg")).is_ok() { + version_control = Some(VersionControl::Hg); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".pijul")).is_ok() { + version_control = Some(VersionControl::Pijul); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".fossil")).is_ok() { + version_control = Some(VersionControl::Fossil); + num_detected_vsces += 1; + } + + // if none exists, maybe create git, like in `cargo new` + + if num_detected_vsces > 1 { + bail!( + "more than one of .hg, .git, .pijul, .fossil configurations \ + found and the ignore file can't be filled in as \ + a result. specify --vcs to override detection" + ); + } + } + + let mkopts = MkOptions { + version_control, + path, + name, + bin: src_paths_types.iter().any(|x| x.bin), + source_files: src_paths_types, + }; + + mk(config, &mkopts).chain_err(|| { + format_err!( + "Failed to create project `{}` at `{}`", + name, + path.display() + ) + })?; + Ok(()) +} + +fn existing_vcs_repo(path: &Path, cwd: &Path) -> bool { + GitRepo::discover(path, cwd).is_ok() || HgRepo::discover(path, cwd).is_ok() +} + +fn mk(config: &Config, opts: &MkOptions) -> CargoResult<()> { + let path = opts.path; + let name = opts.name; + let cfg = global_config(config)?; + // Please ensure that ignore and hgignore are in sync. + let ignore = [ + "/target\n", + "**/*.rs.bk\n", + if !opts.bin { "Cargo.lock\n" } else { "" }, + ].concat(); + // Mercurial glob ignores can't be rooted, so just sticking a 'syntax: glob' at the top of the + // file will exclude too much. Instead, use regexp-based ignores. See 'hg help ignore' for + // more. + let hgignore = [ + "^target/\n", + "glob:*.rs.bk\n", + if !opts.bin { "glob:Cargo.lock\n" } else { "" }, + ].concat(); + + let vcs = opts.version_control.unwrap_or_else(|| { + let in_existing_vcs = existing_vcs_repo(path.parent().unwrap_or(path), config.cwd()); + match (cfg.version_control, in_existing_vcs) { + (None, false) => VersionControl::Git, + (Some(opt), false) => opt, + (_, true) => VersionControl::NoVcs, + } + }); + + match vcs { + VersionControl::Git => { + if !path.join(".git").exists() { + GitRepo::init(path, config.cwd())?; + } + let ignore = if path.join(".gitignore").exists() { + format!("\n{}", ignore) + } else { + ignore + }; + paths::append(&path.join(".gitignore"), ignore.as_bytes())?; + } + VersionControl::Hg => { + if !path.join(".hg").exists() { + HgRepo::init(path, config.cwd())?; + } + let hgignore = if path.join(".hgignore").exists() { + format!("\n{}", hgignore) + } else { + hgignore + }; + paths::append(&path.join(".hgignore"), hgignore.as_bytes())?; + } + VersionControl::Pijul => { + if !path.join(".pijul").exists() { + PijulRepo::init(path, config.cwd())?; + } + let ignore = if path.join(".ignore").exists() { + format!("\n{}", ignore) + } else { + ignore + }; + paths::append(&path.join(".ignore"), ignore.as_bytes())?; + } + VersionControl::Fossil => { + if path.join(".fossil").exists() { + FossilRepo::init(path, config.cwd())?; + } + } + VersionControl::NoVcs => { + fs::create_dir_all(path)?; + } + }; + + let (author_name, email) = discover_author()?; + // Hoo boy, sure glad we've got exhaustiveness checking behind us. + let author = match (cfg.name, cfg.email, author_name, email) { + (Some(name), Some(email), _, _) + | (Some(name), None, _, Some(email)) + | (None, Some(email), name, _) + | (None, None, name, Some(email)) => format!("{} <{}>", name, email), + (Some(name), None, _, None) | (None, None, name, None) => name, + }; + + let mut cargotoml_path_specifier = String::new(); + + // Calculate what [lib] and [[bin]]s do we need to append to Cargo.toml + + for i in &opts.source_files { + if i.bin { + if i.relative_path != "src/main.rs" { + cargotoml_path_specifier.push_str(&format!( + r#" +[[bin]] +name = "{}" +path = {} +"#, + i.target_name, + toml::Value::String(i.relative_path.clone()) + )); + } + } else if i.relative_path != "src/lib.rs" { + cargotoml_path_specifier.push_str(&format!( + r#" +[lib] +name = "{}" +path = {} +"#, + i.target_name, + toml::Value::String(i.relative_path.clone()) + )); + } + } + + // Create Cargo.toml file with necessary [lib] and [[bin]] sections, if needed + + paths::write( + &path.join("Cargo.toml"), + format!( + r#"[package] +name = "{}" +version = "0.1.0" +authors = [{}] + +[dependencies] +{}"#, + name, + toml::Value::String(author), + cargotoml_path_specifier + ).as_bytes(), + )?; + + // Create all specified source files + // (with respective parent directories) + // if they are don't exist + + for i in &opts.source_files { + let path_of_source_file = path.join(i.relative_path.clone()); + + if let Some(src_dir) = path_of_source_file.parent() { + fs::create_dir_all(src_dir)?; + } + + let default_file_content: &[u8] = if i.bin { + b"\ +fn main() { + println!(\"Hello, world!\"); +} +" + } else { + b"\ +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} +" + }; + + if !fs::metadata(&path_of_source_file) + .map(|x| x.is_file()) + .unwrap_or(false) + { + paths::write(&path_of_source_file, default_file_content)?; + } + } + + if let Err(e) = Workspace::new(&path.join("Cargo.toml"), config) { + let msg = format!( + "compiling this new crate may not work due to invalid \ + workspace configuration\n\n{}", + e + ); + config.shell().warn(msg)?; + } + + Ok(()) +} + +fn get_environment_variable(variables: &[&str]) -> Option { + variables.iter().filter_map(|var| env::var(var).ok()).next() +} + +fn discover_author() -> CargoResult<(String, Option)> { + let cwd = env::current_dir()?; + let git_config = if let Ok(repo) = GitRepository::discover(&cwd) { + repo.config() + .ok() + .or_else(|| GitConfig::open_default().ok()) + } else { + GitConfig::open_default().ok() + }; + let git_config = git_config.as_ref(); + let name_variables = [ + "CARGO_NAME", + "GIT_AUTHOR_NAME", + "GIT_COMMITTER_NAME", + "USER", + "USERNAME", + "NAME", + ]; + let name = get_environment_variable(&name_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.name").ok())) + .or_else(|| get_environment_variable(&name_variables[3..])); + + let name = match name { + Some(name) => name, + None => { + let username_var = if cfg!(windows) { "USERNAME" } else { "USER" }; + bail!( + "could not determine the current user, please set ${}", + username_var + ) + } + }; + let email_variables = [ + "CARGO_EMAIL", + "GIT_AUTHOR_EMAIL", + "GIT_COMMITTER_EMAIL", + "EMAIL", + ]; + let email = get_environment_variable(&email_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.email").ok())) + .or_else(|| get_environment_variable(&email_variables[3..])); + + let name = name.trim().to_string(); + let email = email.map(|s| s.trim().to_string()); + + Ok((name, email)) +} + +fn global_config(config: &Config) -> CargoResult { + let name = config.get_string("cargo-new.name")?.map(|s| s.val); + let email = config.get_string("cargo-new.email")?.map(|s| s.val); + let vcs = config.get_string("cargo-new.vcs")?; + + let vcs = match vcs.as_ref().map(|p| (&p.val[..], &p.definition)) { + Some(("git", _)) => Some(VersionControl::Git), + Some(("hg", _)) => Some(VersionControl::Hg), + Some(("pijul", _)) => Some(VersionControl::Pijul), + Some(("none", _)) => Some(VersionControl::NoVcs), + Some((s, p)) => { + return Err(internal(format!( + "invalid configuration for key \ + `cargo-new.vcs`, unknown vcs `{}` \ + (found in {})", + s, p + ))) + } + None => None, + }; + Ok(CargoNewConfig { + name, + email, + version_control: vcs, + }) +} diff --git a/src/cargo/ops/cargo_output_metadata.rs b/src/cargo/ops/cargo_output_metadata.rs new file mode 100644 index 000000000..5cba2d482 --- /dev/null +++ b/src/cargo/ops/cargo_output_metadata.rs @@ -0,0 +1,117 @@ +use serde::ser::{self, Serialize}; + +use core::resolver::Resolve; +use core::{Package, PackageId, Workspace}; +use ops::{self, Packages}; +use util::CargoResult; + +const VERSION: u32 = 1; + +pub struct OutputMetadataOptions { + pub features: Vec, + pub no_default_features: bool, + pub all_features: bool, + pub no_deps: bool, + pub version: u32, +} + +/// Loads the manifest, resolves the dependencies of the project to the concrete +/// used versions - considering overrides - and writes all dependencies in a JSON +/// format to stdout. +pub fn output_metadata(ws: &Workspace, opt: &OutputMetadataOptions) -> CargoResult { + if opt.version != VERSION { + bail!( + "metadata version {} not supported, only {} is currently supported", + opt.version, + VERSION + ); + } + if opt.no_deps { + metadata_no_deps(ws, opt) + } else { + metadata_full(ws, opt) + } +} + +fn metadata_no_deps(ws: &Workspace, _opt: &OutputMetadataOptions) -> CargoResult { + Ok(ExportInfo { + packages: ws.members().cloned().collect(), + workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(), + resolve: None, + target_directory: ws.target_dir().display().to_string(), + version: VERSION, + workspace_root: ws.root().display().to_string(), + }) +} + +fn metadata_full(ws: &Workspace, opt: &OutputMetadataOptions) -> CargoResult { + let specs = Packages::All.into_package_id_specs(ws)?; + let deps = ops::resolve_ws_precisely( + ws, + None, + &opt.features, + opt.all_features, + opt.no_default_features, + &specs, + )?; + let (packages, resolve) = deps; + + let packages = packages + .package_ids() + .map(|i| packages.get(i).map(|p| p.clone())) + .collect::>>()?; + + Ok(ExportInfo { + packages, + workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(), + resolve: Some(MetadataResolve { + resolve, + root: ws.current_opt().map(|pkg| pkg.package_id().clone()), + }), + target_directory: ws.target_dir().display().to_string(), + version: VERSION, + workspace_root: ws.root().display().to_string(), + }) +} + +#[derive(Serialize)] +pub struct ExportInfo { + packages: Vec, + workspace_members: Vec, + resolve: Option, + target_directory: String, + version: u32, + workspace_root: String, +} + +/// Newtype wrapper to provide a custom `Serialize` implementation. +/// The one from lockfile does not fit because it uses a non-standard +/// format for `PackageId`s +#[derive(Serialize)] +struct MetadataResolve { + #[serde(rename = "nodes", serialize_with = "serialize_resolve")] + resolve: Resolve, + root: Option, +} + +fn serialize_resolve(resolve: &Resolve, s: S) -> Result +where + S: ser::Serializer, +{ + #[derive(Serialize)] + struct Node<'a> { + id: &'a PackageId, + dependencies: Vec<&'a PackageId>, + features: Vec<&'a str>, + } + + resolve + .iter() + .map(|id| Node { + id, + dependencies: resolve.deps(id).map(|p| p.0).collect(), + features: resolve.features_sorted(id), + }) + .collect::>() + .serialize(s) +} diff --git a/src/cargo/ops/cargo_package.rs b/src/cargo/ops/cargo_package.rs new file mode 100644 index 000000000..f9de82868 --- /dev/null +++ b/src/cargo/ops/cargo_package.rs @@ -0,0 +1,386 @@ +use std::fs::{self, File}; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::path::{self, Path}; +use std::sync::Arc; + +use flate2::read::GzDecoder; +use flate2::{Compression, GzBuilder}; +use git2; +use tar::{Archive, Builder, EntryType, Header}; + +use core::{Package, Source, SourceId, Workspace}; +use core::compiler::{BuildConfig, CompileMode, DefaultExecutor}; +use sources::PathSource; +use util::{self, internal, Config, FileLock}; +use util::paths; +use util::errors::{CargoResult, CargoResultExt}; +use ops; + +pub struct PackageOpts<'cfg> { + pub config: &'cfg Config, + pub list: bool, + pub check_metadata: bool, + pub allow_dirty: bool, + pub verify: bool, + pub jobs: Option, + pub target: Option, + pub registry: Option, +} + +pub fn package(ws: &Workspace, opts: &PackageOpts) -> CargoResult> { + ops::resolve_ws(ws)?; + let pkg = ws.current()?; + let config = ws.config(); + + let mut src = PathSource::new(pkg.root(), pkg.package_id().source_id(), config); + src.update()?; + + if opts.check_metadata { + check_metadata(pkg, config)?; + } + + verify_dependencies(pkg)?; + + if opts.list { + let root = pkg.root(); + let mut list: Vec<_> = src.list_files(pkg)? + .iter() + .map(|file| util::without_prefix(file, root).unwrap().to_path_buf()) + .collect(); + if include_lockfile(pkg) { + list.push("Cargo.lock".into()); + } + list.sort(); + for file in list.iter() { + println!("{}", file.display()); + } + return Ok(None); + } + + if !opts.allow_dirty { + check_not_dirty(pkg, &src)?; + } + + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let dir = ws.target_dir().join("package"); + let mut dst = { + let tmp = format!(".{}", filename); + dir.open_rw(&tmp, config, "package scratch space")? + }; + + // Package up and test a temporary tarball and only move it to the final + // location if it actually passes all our tests. Any previously existing + // tarball can be assumed as corrupt or invalid, so we just blow it away if + // it exists. + config + .shell() + .status("Packaging", pkg.package_id().to_string())?; + dst.file().set_len(0)?; + tar(ws, &src, dst.file(), &filename) + .chain_err(|| format_err!("failed to prepare local package for uploading"))?; + if opts.verify { + dst.seek(SeekFrom::Start(0))?; + run_verify(ws, &dst, opts).chain_err(|| "failed to verify package tarball")? + } + dst.seek(SeekFrom::Start(0))?; + { + let src_path = dst.path(); + let dst_path = dst.parent().join(&filename); + fs::rename(&src_path, &dst_path) + .chain_err(|| "failed to move temporary tarball into final location")?; + } + Ok(Some(dst)) +} + +fn include_lockfile(pkg: &Package) -> bool { + pkg.manifest().publish_lockfile() && pkg.targets().iter().any(|t| t.is_example() || t.is_bin()) +} + +// check that the package has some piece of metadata that a human can +// use to tell what the package is about. +fn check_metadata(pkg: &Package, config: &Config) -> CargoResult<()> { + let md = pkg.manifest().metadata(); + + let mut missing = vec![]; + + macro_rules! lacking { + ($( $($field: ident)||* ),*) => {{ + $( + if $(md.$field.as_ref().map_or(true, |s| s.is_empty()))&&* { + $(missing.push(stringify!($field).replace("_", "-"));)* + } + )* + }} + } + lacking!( + description, + license || license_file, + documentation || homepage || repository + ); + + if !missing.is_empty() { + let mut things = missing[..missing.len() - 1].join(", "); + // things will be empty if and only if length == 1 (i.e. the only case + // to have no `or`). + if !things.is_empty() { + things.push_str(" or "); + } + things.push_str(missing.last().unwrap()); + + config.shell().warn(&format!( + "manifest has no {things}.\n\ + See http://doc.crates.io/manifest.html#package-metadata for more info.", + things = things + ))? + } + Ok(()) +} + +// check that the package dependencies are safe to deploy. +fn verify_dependencies(pkg: &Package) -> CargoResult<()> { + for dep in pkg.dependencies() { + if dep.source_id().is_path() && !dep.specified_req() { + bail!( + "all path dependencies must have a version specified \ + when packaging.\ndependency `{}` does not specify \ + a version.", + dep.name() + ) + } + } + Ok(()) +} + +fn check_not_dirty(p: &Package, src: &PathSource) -> CargoResult<()> { + if let Ok(repo) = git2::Repository::discover(p.root()) { + if let Some(workdir) = repo.workdir() { + debug!( + "found a git repo at {:?}, checking if index present", + workdir + ); + let path = p.manifest_path(); + let path = path.strip_prefix(workdir).unwrap_or(path); + if let Ok(status) = repo.status_file(path) { + if (status & git2::Status::IGNORED).is_empty() { + debug!("Cargo.toml found in repo, checking if dirty"); + return git(p, src, &repo); + } + } + } + } + + // No VCS recognized, we don't know if the directory is dirty or not, so we + // have to assume that it's clean. + return Ok(()); + + fn git(p: &Package, src: &PathSource, repo: &git2::Repository) -> CargoResult<()> { + let workdir = repo.workdir().unwrap(); + let dirty = src.list_files(p)? + .iter() + .filter(|file| { + let relative = file.strip_prefix(workdir).unwrap(); + if let Ok(status) = repo.status_file(relative) { + status != git2::Status::CURRENT + } else { + false + } + }) + .map(|path| { + path.strip_prefix(p.root()) + .unwrap_or(path) + .display() + .to_string() + }) + .collect::>(); + if dirty.is_empty() { + Ok(()) + } else { + bail!( + "{} files in the working directory contain changes that were \ + not yet committed into git:\n\n{}\n\n\ + to proceed despite this, pass the `--allow-dirty` flag", + dirty.len(), + dirty.join("\n") + ) + } + } +} + +fn tar(ws: &Workspace, src: &PathSource, dst: &File, filename: &str) -> CargoResult<()> { + // Prepare the encoder and its header + let filename = Path::new(filename); + let encoder = GzBuilder::new() + .filename(util::path2bytes(filename)?) + .write(dst, Compression::best()); + + // Put all package files into a compressed archive + let mut ar = Builder::new(encoder); + let pkg = ws.current()?; + let config = ws.config(); + let root = pkg.root(); + for file in src.list_files(pkg)?.iter() { + let relative = util::without_prefix(file, root).unwrap(); + check_filename(relative)?; + let relative = relative.to_str().ok_or_else(|| { + format_err!("non-utf8 path in source directory: {}", relative.display()) + })?; + config + .shell() + .verbose(|shell| shell.status("Archiving", &relative))?; + let path = format!( + "{}-{}{}{}", + pkg.name(), + pkg.version(), + path::MAIN_SEPARATOR, + relative + ); + + // The tar::Builder type by default will build GNU archives, but + // unfortunately we force it here to use UStar archives instead. The + // UStar format has more limitations on the length of path name that it + // can encode, so it's not quite as nice to use. + // + // Older cargos, however, had a bug where GNU archives were interpreted + // as UStar archives. This bug means that if we publish a GNU archive + // which has fully filled out metadata it'll be corrupt when unpacked by + // older cargos. + // + // Hopefully in the future after enough cargos have been running around + // with the bugfixed tar-rs library we'll be able to switch this over to + // GNU archives, but for now we'll just say that you can't encode paths + // in archives that are *too* long. + // + // For an instance of this in the wild, use the tar-rs 0.3.3 library to + // unpack the selectors 0.4.0 crate on crates.io. Either that or take a + // look at rust-lang/cargo#2326 + let mut header = Header::new_ustar(); + header + .set_path(&path) + .chain_err(|| format!("failed to add to archive: `{}`", relative))?; + let mut file = File::open(file) + .chain_err(|| format!("failed to open for archiving: `{}`", file.display()))?; + let metadata = file.metadata() + .chain_err(|| format!("could not learn metadata for: `{}`", relative))?; + header.set_metadata(&metadata); + + if relative == "Cargo.toml" { + let orig = Path::new(&path).with_file_name("Cargo.toml.orig"); + header.set_path(&orig)?; + header.set_cksum(); + ar.append(&header, &mut file) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + + let mut header = Header::new_ustar(); + let toml = pkg.to_registry_toml(ws.config())?; + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(toml.len() as u64); + header.set_cksum(); + ar.append(&header, toml.as_bytes()) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + } else { + header.set_cksum(); + ar.append(&header, &mut file) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + } + } + + if include_lockfile(pkg) { + let toml = paths::read(&ws.root().join("Cargo.lock"))?; + let path = format!( + "{}-{}{}Cargo.lock", + pkg.name(), + pkg.version(), + path::MAIN_SEPARATOR + ); + let mut header = Header::new_ustar(); + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(toml.len() as u64); + header.set_cksum(); + ar.append(&header, toml.as_bytes()) + .chain_err(|| internal("could not archive source file `Cargo.lock`"))?; + } + + let encoder = ar.into_inner()?; + encoder.finish()?; + Ok(()) +} + +fn run_verify(ws: &Workspace, tar: &FileLock, opts: &PackageOpts) -> CargoResult<()> { + let config = ws.config(); + let pkg = ws.current()?; + + config.shell().status("Verifying", pkg)?; + + let f = GzDecoder::new(tar.file()); + let dst = tar.parent() + .join(&format!("{}-{}", pkg.name(), pkg.version())); + if dst.exists() { + paths::remove_dir_all(&dst)?; + } + let mut archive = Archive::new(f); + archive.unpack(dst.parent().unwrap())?; + + // Manufacture an ephemeral workspace to ensure that even if the top-level + // package has a workspace we can still build our new crate. + let id = SourceId::for_path(&dst)?; + let mut src = PathSource::new(&dst, &id, ws.config()); + let new_pkg = src.root_package()?; + let ws = Workspace::ephemeral(new_pkg, config, None, true)?; + + ops::compile_ws( + &ws, + None, + &ops::CompileOptions { + config, + build_config: BuildConfig::new(config, opts.jobs, &opts.target, CompileMode::Build)?, + features: Vec::new(), + no_default_features: false, + all_features: false, + spec: ops::Packages::Packages(Vec::new()), + filter: ops::CompileFilter::Default { + required_features_filterable: true, + }, + target_rustdoc_args: None, + target_rustc_args: None, + export_dir: None, + }, + Arc::new(DefaultExecutor), + )?; + + Ok(()) +} + +// It can often be the case that files of a particular name on one platform +// can't actually be created on another platform. For example files with colons +// in the name are allowed on Unix but not on Windows. +// +// To help out in situations like this, issue about weird filenames when +// packaging as a "heads up" that something may not work on other platforms. +fn check_filename(file: &Path) -> CargoResult<()> { + let name = match file.file_name() { + Some(name) => name, + None => return Ok(()), + }; + let name = match name.to_str() { + Some(name) => name, + None => bail!( + "path does not have a unicode filename which may not unpack \ + on all platforms: {}", + file.display() + ), + }; + let bad_chars = ['/', '\\', '<', '>', ':', '"', '|', '?', '*']; + if let Some(c) = bad_chars.iter().find(|c| name.contains(**c)) { + bail!( + "cannot package a filename with a special character `{}`: {}", + c, + file.display() + ) + } + Ok(()) +} diff --git a/src/cargo/ops/cargo_pkgid.rs b/src/cargo/ops/cargo_pkgid.rs new file mode 100644 index 000000000..0461bc4c8 --- /dev/null +++ b/src/cargo/ops/cargo_pkgid.rs @@ -0,0 +1,16 @@ +use ops; +use core::{PackageIdSpec, Workspace}; +use util::CargoResult; + +pub fn pkgid(ws: &Workspace, spec: Option<&str>) -> CargoResult { + let resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => bail!("a Cargo.lock must exist for this command"), + }; + + let pkgid = match spec { + Some(spec) => PackageIdSpec::query_str(spec, resolve.iter())?, + None => ws.current()?.package_id(), + }; + Ok(PackageIdSpec::from_package_id(pkgid)) +} diff --git a/src/cargo/ops/cargo_read_manifest.rs b/src/cargo/ops/cargo_read_manifest.rs new file mode 100644 index 000000000..aefe5b0ee --- /dev/null +++ b/src/cargo/ops/cargo_read_manifest.rs @@ -0,0 +1,199 @@ +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use core::{EitherManifest, Package, PackageId, SourceId}; +use util::{self, Config}; +use util::errors::{CargoError, CargoResult}; +use util::important_paths::find_project_manifest_exact; +use util::toml::read_manifest; + +pub fn read_package( + path: &Path, + source_id: &SourceId, + config: &Config, +) -> CargoResult<(Package, Vec)> { + trace!( + "read_package; path={}; source-id={}", + path.display(), + source_id + ); + let (manifest, nested) = read_manifest(path, source_id, config)?; + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => bail!( + "found a virtual manifest at `{}` instead of a package \ + manifest", + path.display() + ), + }; + + Ok((Package::new(manifest, path), nested)) +} + +pub fn read_packages( + path: &Path, + source_id: &SourceId, + config: &Config, +) -> CargoResult> { + let mut all_packages = HashMap::new(); + let mut visited = HashSet::::new(); + let mut errors = Vec::::new(); + + trace!( + "looking for root package: {}, source_id={}", + path.display(), + source_id + ); + + walk(path, &mut |dir| { + trace!("looking for child package: {}", dir.display()); + + // Don't recurse into hidden/dot directories unless we're at the toplevel + if dir != path { + let name = dir.file_name().and_then(|s| s.to_str()); + if name.map(|s| s.starts_with('.')) == Some(true) { + return Ok(false); + } + + // Don't automatically discover packages across git submodules + if fs::metadata(&dir.join(".git")).is_ok() { + return Ok(false); + } + } + + // Don't ever look at target directories + if dir.file_name().and_then(|s| s.to_str()) == Some("target") + && has_manifest(dir.parent().unwrap()) + { + return Ok(false); + } + + if has_manifest(dir) { + read_nested_packages( + dir, + &mut all_packages, + source_id, + config, + &mut visited, + &mut errors, + )?; + } + Ok(true) + })?; + + if all_packages.is_empty() { + match errors.pop() { + Some(err) => Err(err), + None => Err(format_err!( + "Could not find Cargo.toml in `{}`", + path.display() + )), + } + } else { + Ok(all_packages.into_iter().map(|(_, v)| v).collect()) + } +} + +fn walk(path: &Path, callback: &mut FnMut(&Path) -> CargoResult) -> CargoResult<()> { + if !callback(path)? { + trace!("not processing {}", path.display()); + return Ok(()); + } + + // Ignore any permission denied errors because temporary directories + // can often have some weird permissions on them. + let dirs = match fs::read_dir(path) { + Ok(dirs) => dirs, + Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => return Ok(()), + Err(e) => { + let cx = format!("failed to read directory `{}`", path.display()); + let e = CargoError::from(e); + return Err(e.context(cx).into()); + } + }; + for dir in dirs { + let dir = dir?; + if dir.file_type()?.is_dir() { + walk(&dir.path(), callback)?; + } + } + Ok(()) +} + +fn has_manifest(path: &Path) -> bool { + find_project_manifest_exact(path, "Cargo.toml").is_ok() +} + +fn read_nested_packages( + path: &Path, + all_packages: &mut HashMap, + source_id: &SourceId, + config: &Config, + visited: &mut HashSet, + errors: &mut Vec, +) -> CargoResult<()> { + if !visited.insert(path.to_path_buf()) { + return Ok(()); + } + + let manifest_path = find_project_manifest_exact(path, "Cargo.toml")?; + + let (manifest, nested) = match read_manifest(&manifest_path, source_id, config) { + Err(err) => { + // Ignore malformed manifests found on git repositories + // + // git source try to find and read all manifests from the repository + // but since it's not possible to exclude folders from this search + // it's safer to ignore malformed manifests to avoid + // + // TODO: Add a way to exclude folders? + info!( + "skipping malformed package found at `{}`", + path.to_string_lossy() + ); + errors.push(err); + return Ok(()); + } + Ok(tuple) => tuple, + }; + + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => return Ok(()), + }; + let pkg = Package::new(manifest, &manifest_path); + + let pkg_id = pkg.package_id().clone(); + use std::collections::hash_map::Entry; + match all_packages.entry(pkg_id) { + Entry::Vacant(v) => { + v.insert(pkg); + } + Entry::Occupied(_) => { + info!( + "skipping nested package `{}` found at `{}`", + pkg.name(), + path.to_string_lossy() + ); + } + } + + // Registry sources are not allowed to have `path=` dependencies because + // they're all translated to actual registry dependencies. + // + // We normalize the path here ensure that we don't infinitely walk around + // looking for crates. By normalizing we ensure that we visit this crate at + // most once. + // + // TODO: filesystem/symlink implications? + if !source_id.is_registry() { + for p in nested.iter() { + let path = util::normalize_path(&path.join(p)); + read_nested_packages(&path, all_packages, source_id, config, visited, errors)?; + } + } + + Ok(()) +} diff --git a/src/cargo/ops/cargo_run.rs b/src/cargo/ops/cargo_run.rs new file mode 100644 index 000000000..a2e35c3d5 --- /dev/null +++ b/src/cargo/ops/cargo_run.rs @@ -0,0 +1,105 @@ +use std::path::Path; + +use ops::{self, Packages}; +use util::{self, CargoResult, ProcessError}; +use core::{TargetKind, Workspace}; + +pub fn run( + ws: &Workspace, + options: &ops::CompileOptions, + args: &[String], +) -> CargoResult> { + let config = ws.config(); + + let pkg = match options.spec { + Packages::All | Packages::Default | Packages::OptOut(_) => { + unreachable!("cargo run supports single package only") + } + Packages::Packages(ref xs) => match xs.len() { + 0 => ws.current()?, + 1 => ws.members() + .find(|pkg| *pkg.name() == xs[0]) + .ok_or_else(|| { + format_err!("package `{}` is not a member of the workspace", xs[0]) + })?, + _ => unreachable!("cargo run supports single package only"), + }, + }; + + let bins: Vec<_> = pkg.manifest() + .targets() + .iter() + .filter(|a| { + !a.is_lib() && !a.is_custom_build() && if !options.filter.is_specific() { + a.is_bin() + } else { + options.filter.target_run(a) + } + }) + .map(|bin| (bin.name(), bin.kind())) + .collect(); + + if bins.is_empty() { + if !options.filter.is_specific() { + bail!("a bin target must be available for `cargo run`") + } else { + // this will be verified in cargo_compile + } + } + + if bins.len() == 1 { + let &(name, kind) = bins.first().unwrap(); + match kind { + &TargetKind::ExampleLib(..) => { + bail!( + "example target `{}` is a library and cannot be executed", + name + ) + }, + _ => { } + }; + } + + if bins.len() > 1 { + if !options.filter.is_specific() { + let names: Vec<&str> = bins.into_iter().map(|bin| bin.0).collect(); + bail!( + "`cargo run` requires that a project only have one \ + executable; use the `--bin` option to specify which one \ + to run\navailable binaries: {}", + names.join(", ") + + ) + } else { + bail!( + "`cargo run` can run at most one executable, but \ + multiple were specified" + ) + } + } + + let compile = ops::compile(ws, options)?; + assert_eq!(compile.binaries.len(), 1); + let exe = &compile.binaries[0]; + let exe = match util::without_prefix(exe, config.cwd()) { + Some(path) if path.file_name() == Some(path.as_os_str()) => { + Path::new(".").join(path).to_path_buf() + } + Some(path) => path.to_path_buf(), + None => exe.to_path_buf(), + }; + let mut process = compile.target_process(exe, pkg)?; + process.args(args).cwd(config.cwd()); + + config.shell().status("Running", process.to_string())?; + + let result = process.exec_replace(); + + match result { + Ok(()) => Ok(None), + Err(e) => { + let err = e.downcast::()?; + Ok(Some(err)) + } + } +} \ No newline at end of file diff --git a/src/cargo/ops/cargo_test.rs b/src/cargo/ops/cargo_test.rs new file mode 100644 index 000000000..c65cc1970 --- /dev/null +++ b/src/cargo/ops/cargo_test.rs @@ -0,0 +1,241 @@ +use std::ffi::{OsStr, OsString}; + +use ops; +use core::compiler::Compilation; +use util::{self, CargoTestError, ProcessError, Test}; +use util::errors::CargoResult; +use core::Workspace; + +pub struct TestOptions<'a> { + pub compile_opts: ops::CompileOptions<'a>, + pub no_run: bool, + pub no_fail_fast: bool, + pub only_doc: bool, +} + +pub fn run_tests( + ws: &Workspace, + options: &TestOptions, + test_args: &[String], +) -> CargoResult> { + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None); + } + let (test, mut errors) = if options.only_doc { + assert!(options.compile_opts.filter.is_specific()); + run_doc_tests(options, test_args, &compilation)? + } else { + run_unit_tests(options, test_args, &compilation)? + }; + + // If we have an error and want to fail fast, return + if !errors.is_empty() && !options.no_fail_fast { + return Ok(Some(CargoTestError::new(test, errors))); + } + + // If a specific test was requested or we're not running any tests at all, + // don't run any doc tests. + if options.compile_opts.filter.is_specific() { + match errors.len() { + 0 => return Ok(None), + _ => return Ok(Some(CargoTestError::new(test, errors))), + } + } + + let (doctest, docerrors) = run_doc_tests(options, test_args, &compilation)?; + let test = if docerrors.is_empty() { test } else { doctest }; + errors.extend(docerrors); + if errors.is_empty() { + Ok(None) + } else { + Ok(Some(CargoTestError::new(test, errors))) + } +} + +pub fn run_benches( + ws: &Workspace, + options: &TestOptions, + args: &[String], +) -> CargoResult> { + let mut args = args.to_vec(); + args.push("--bench".to_string()); + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None); + } + let (test, errors) = run_unit_tests(options, &args, &compilation)?; + match errors.len() { + 0 => Ok(None), + _ => Ok(Some(CargoTestError::new(test, errors))), + } +} + +fn compile_tests<'a>( + ws: &Workspace<'a>, + options: &TestOptions<'a>, +) -> CargoResult> { + let mut compilation = ops::compile(ws, &options.compile_opts)?; + compilation + .tests + .sort_by(|a, b| (a.0.package_id(), &a.1, &a.2).cmp(&(b.0.package_id(), &b.1, &b.2))); + Ok(compilation) +} + +/// Run the unit and integration tests of a project. +fn run_unit_tests( + options: &TestOptions, + test_args: &[String], + compilation: &Compilation, +) -> CargoResult<(Test, Vec)> { + let config = options.compile_opts.config; + let cwd = options.compile_opts.config.cwd(); + + let mut errors = Vec::new(); + + for &(ref pkg, ref kind, ref test, ref exe) in &compilation.tests { + let to_display = match util::without_prefix(exe, cwd) { + Some(path) => path, + None => &**exe, + }; + let mut cmd = compilation.target_process(exe, pkg)?; + cmd.args(test_args); + config + .shell() + .concise(|shell| shell.status("Running", to_display.display().to_string()))?; + config + .shell() + .verbose(|shell| shell.status("Running", cmd.to_string()))?; + + let result = cmd.exec(); + + match result { + Err(e) => { + let e = e.downcast::()?; + errors.push((kind.clone(), test.clone(), pkg.name().to_string(), e)); + if !options.no_fail_fast { + break; + } + } + Ok(()) => {} + } + } + + if errors.len() == 1 { + let (kind, name, pkg_name, e) = errors.pop().unwrap(); + Ok(( + Test::UnitTest { + kind, + name, + pkg_name, + }, + vec![e], + )) + } else { + Ok(( + Test::Multiple, + errors.into_iter().map(|(_, _, _, e)| e).collect(), + )) + } +} + +fn run_doc_tests( + options: &TestOptions, + test_args: &[String], + compilation: &Compilation, +) -> CargoResult<(Test, Vec)> { + let mut errors = Vec::new(); + let config = options.compile_opts.config; + + // We don't build/rust doctests if target != host + if compilation.host != compilation.target { + return Ok((Test::Doc, errors)); + } + + let libs = compilation.to_doc_test.iter().map(|package| { + ( + package, + package + .targets() + .iter() + .filter(|t| t.doctested()) + .map(|t| (t.src_path(), t.name(), t.crate_name())), + ) + }); + + for (package, tests) in libs { + for (lib, name, crate_name) in tests { + config.shell().status("Doc-tests", name)?; + let mut p = compilation.rustdoc_process(package)?; + p.arg("--test") + .arg(lib) + .arg("--crate-name") + .arg(&crate_name); + + for &rust_dep in &[&compilation.deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(rust_dep); + p.arg("-L").arg(arg); + } + + for native_dep in compilation.native_dirs.iter() { + p.arg("-L").arg(native_dep); + } + + for &host_rust_dep in &[&compilation.host_deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(host_rust_dep); + p.arg("-L").arg(arg); + } + + for arg in test_args { + p.arg("--test-args").arg(arg); + } + + if let Some(cfgs) = compilation.cfgs.get(package.package_id()) { + for cfg in cfgs.iter() { + p.arg("--cfg").arg(cfg); + } + } + + let libs = &compilation.libraries[package.package_id()]; + for &(ref target, ref lib) in libs.iter() { + // Note that we can *only* doctest rlib outputs here. A + // staticlib output cannot be linked by the compiler (it just + // doesn't do that). A dylib output, however, can be linked by + // the compiler, but will always fail. Currently all dylibs are + // built as "static dylibs" where the standard library is + // statically linked into the dylib. The doc tests fail, + // however, for now as they try to link the standard library + // dynamically as well, causing problems. As a result we only + // pass `--extern` for rlib deps and skip out on all other + // artifacts. + if lib.extension() != Some(OsStr::new("rlib")) && !target.for_host() { + continue; + } + let mut arg = OsString::from(target.crate_name()); + arg.push("="); + arg.push(lib); + p.arg("--extern").arg(&arg); + } + + if let Some(flags) = compilation.rustdocflags.get(package.package_id()) { + p.args(flags); + } + + config + .shell() + .verbose(|shell| shell.status("Running", p.to_string()))?; + if let Err(e) = p.exec() { + let e = e.downcast::()?; + errors.push(e); + if !options.no_fail_fast { + return Ok((Test::Doc, errors)); + } + } + } + } + Ok((Test::Doc, errors)) +} diff --git a/src/cargo/ops/lockfile.rs b/src/cargo/ops/lockfile.rs new file mode 100644 index 000000000..d90e5f408 --- /dev/null +++ b/src/cargo/ops/lockfile.rs @@ -0,0 +1,161 @@ +use std::io::prelude::*; + +use toml; + +use core::{resolver, Resolve, Workspace}; +use core::resolver::WorkspaceResolve; +use util::Filesystem; +use util::errors::{CargoResult, CargoResultExt}; +use util::toml as cargo_toml; + +pub fn load_pkg_lockfile(ws: &Workspace) -> CargoResult> { + if !ws.root().join("Cargo.lock").exists() { + return Ok(None); + } + + let root = Filesystem::new(ws.root().to_path_buf()); + let mut f = root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file")?; + + let mut s = String::new(); + f.read_to_string(&mut s) + .chain_err(|| format!("failed to read file: {}", f.path().display()))?; + + let resolve = + (|| -> CargoResult> { + let resolve: toml::Value = cargo_toml::parse(&s, f.path(), ws.config())?; + let v: resolver::EncodableResolve = resolve.try_into()?; + Ok(Some(v.into_resolve(ws)?)) + })() + .chain_err(|| format!("failed to parse lock file at: {}", f.path().display()))?; + Ok(resolve) +} + +pub fn write_pkg_lockfile(ws: &Workspace, resolve: &Resolve) -> CargoResult<()> { + // Load the original lockfile if it exists. + let ws_root = Filesystem::new(ws.root().to_path_buf()); + let orig = ws_root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file"); + let orig = orig.and_then(|mut f| { + let mut s = String::new(); + f.read_to_string(&mut s)?; + Ok(s) + }); + + let toml = toml::Value::try_from(WorkspaceResolve { ws, resolve }).unwrap(); + + let mut out = String::new(); + + let deps = toml["package"].as_array().unwrap(); + for dep in deps.iter() { + let dep = dep.as_table().unwrap(); + + out.push_str("[[package]]\n"); + emit_package(dep, &mut out); + } + + if let Some(patch) = toml.get("patch") { + let list = patch["unused"].as_array().unwrap(); + for entry in list { + out.push_str("[[patch.unused]]\n"); + emit_package(entry.as_table().unwrap(), &mut out); + out.push_str("\n"); + } + } + + if let Some(meta) = toml.get("metadata") { + out.push_str("[metadata]\n"); + out.push_str(&meta.to_string()); + } + + // If the lockfile contents haven't changed so don't rewrite it. This is + // helpful on read-only filesystems. + if let Ok(orig) = orig { + if are_equal_lockfiles(orig, &out, ws) { + return Ok(()); + } + } + + if !ws.config().lock_update_allowed() { + if ws.config().cli_unstable().offline { + bail!("can't update in the offline mode"); + } + + let flag = if ws.config().network_allowed() { + "--locked" + } else { + "--frozen" + }; + bail!( + "the lock file needs to be updated but {} was passed to \ + prevent this", + flag + ); + } + + // Ok, if that didn't work just write it out + ws_root + .open_rw("Cargo.lock", ws.config(), "Cargo.lock file") + .and_then(|mut f| { + f.file().set_len(0)?; + f.write_all(out.as_bytes())?; + Ok(()) + }) + .chain_err(|| format!("failed to write {}", ws.root().join("Cargo.lock").display()))?; + Ok(()) +} + +fn are_equal_lockfiles(mut orig: String, current: &str, ws: &Workspace) -> bool { + if has_crlf_line_endings(&orig) { + orig = orig.replace("\r\n", "\n"); + } + + // If we want to try and avoid updating the lockfile, parse both and + // compare them; since this is somewhat expensive, don't do it in the + // common case where we can update lockfiles. + if !ws.config().lock_update_allowed() { + let res: CargoResult = (|| { + let old: resolver::EncodableResolve = toml::from_str(&orig)?; + let new: resolver::EncodableResolve = toml::from_str(current)?; + Ok(old.into_resolve(ws)? == new.into_resolve(ws)?) + })(); + if let Ok(true) = res { + return true; + } + } + + current == orig +} + +fn has_crlf_line_endings(s: &str) -> bool { + // Only check the first line. + if let Some(lf) = s.find('\n') { + s[..lf].ends_with('\r') + } else { + false + } +} + +fn emit_package(dep: &toml::value::Table, out: &mut String) { + out.push_str(&format!("name = {}\n", &dep["name"])); + out.push_str(&format!("version = {}\n", &dep["version"])); + + if dep.contains_key("source") { + out.push_str(&format!("source = {}\n", &dep["source"])); + } + + if let Some(s) = dep.get("dependencies") { + let slice = s.as_array().unwrap(); + + if !slice.is_empty() { + out.push_str("dependencies = [\n"); + + for child in slice.iter() { + out.push_str(&format!(" {},\n", child)); + } + + out.push_str("]\n"); + } + out.push_str("\n"); + } else if dep.contains_key("replace") { + out.push_str(&format!("replace = {}\n\n", &dep["replace"])); + } +} diff --git a/src/cargo/ops/mod.rs b/src/cargo/ops/mod.rs new file mode 100644 index 000000000..1d0619361 --- /dev/null +++ b/src/cargo/ops/mod.rs @@ -0,0 +1,40 @@ +pub use self::cargo_clean::{clean, CleanOptions}; +pub use self::cargo_compile::{compile, compile_with_exec, compile_ws, CompileOptions}; +pub use self::cargo_compile::{CompileFilter, FilterRule, Packages}; +pub use self::cargo_read_manifest::{read_package, read_packages}; +pub use self::cargo_run::run; +pub use self::cargo_install::{install, install_list, uninstall}; +pub use self::cargo_new::{init, new, NewOptions, VersionControl}; +pub use self::cargo_doc::{doc, DocOptions}; +pub use self::cargo_generate_lockfile::generate_lockfile; +pub use self::cargo_generate_lockfile::update_lockfile; +pub use self::cargo_generate_lockfile::UpdateOptions; +pub use self::lockfile::{load_pkg_lockfile, write_pkg_lockfile}; +pub use self::cargo_test::{run_benches, run_tests, TestOptions}; +pub use self::cargo_package::{package, PackageOpts}; +pub use self::registry::{publish, registry_configuration, RegistryConfig}; +pub use self::registry::{http_handle, needs_custom_http_transport, registry_login, search}; +pub use self::registry::{modify_owners, yank, OwnersOptions, PublishOpts}; +pub use self::registry::configure_http_handle; +pub use self::cargo_fetch::{fetch, FetchOptions}; +pub use self::cargo_pkgid::pkgid; +pub use self::resolve::{resolve_with_previous, resolve_ws, resolve_ws_precisely, + resolve_ws_with_method}; +pub use self::cargo_output_metadata::{output_metadata, ExportInfo, OutputMetadataOptions}; + +mod cargo_clean; +mod cargo_compile; +mod cargo_doc; +mod cargo_fetch; +mod cargo_generate_lockfile; +mod cargo_install; +mod cargo_new; +mod cargo_output_metadata; +mod cargo_package; +mod cargo_pkgid; +mod cargo_read_manifest; +mod cargo_run; +mod cargo_test; +mod lockfile; +mod registry; +mod resolve; diff --git a/src/cargo/ops/registry.rs b/src/cargo/ops/registry.rs new file mode 100644 index 000000000..2e1f67142 --- /dev/null +++ b/src/cargo/ops/registry.rs @@ -0,0 +1,622 @@ +use std::{cmp, env}; +use std::collections::BTreeMap; +use std::fs::{self, File}; +use std::iter::repeat; +use std::time::Duration; + +use curl::easy::{Easy, SslOpt}; +use git2; +use registry::{NewCrate, NewCrateDependency, Registry}; + +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +use version; +use core::source::Source; +use core::{Package, SourceId, Workspace}; +use core::dependency::Kind; +use core::manifest::ManifestMetadata; +use ops; +use sources::RegistrySource; +use util::config::{self, Config}; +use util::paths; +use util::ToUrl; +use util::errors::{CargoResult, CargoResultExt}; +use util::important_paths::find_root_manifest_for_wd; + +pub struct RegistryConfig { + pub index: Option, + pub token: Option, +} + +pub struct PublishOpts<'cfg> { + pub config: &'cfg Config, + pub token: Option, + pub index: Option, + pub verify: bool, + pub allow_dirty: bool, + pub jobs: Option, + pub target: Option, + pub dry_run: bool, + pub registry: Option, +} + +pub fn publish(ws: &Workspace, opts: &PublishOpts) -> CargoResult<()> { + let pkg = ws.current()?; + + // Allow publishing if a registry has been provided, or if there are no nightly + // features enabled. + if opts.registry.is_none() && !pkg.manifest().features().activated().is_empty() { + bail!("cannot publish crates which activate nightly-only cargo features to crates.io") + } + + if let Some(ref allowed_registries) = *pkg.publish() { + if !match opts.registry { + Some(ref registry) => allowed_registries.contains(registry), + None => false, + } { + bail!( + "some crates cannot be published.\n\ + `{}` is marked as unpublishable", + pkg.name() + ); + } + } + + if !pkg.manifest().patch().is_empty() { + bail!("published crates cannot contain [patch] sections"); + } + + let (mut registry, reg_id) = registry( + opts.config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + )?; + verify_dependencies(pkg, ®_id)?; + + // Prepare a tarball, with a non-surpressable warning if metadata + // is missing since this is being put online. + let tarball = ops::package( + ws, + &ops::PackageOpts { + config: opts.config, + verify: opts.verify, + list: false, + check_metadata: true, + allow_dirty: opts.allow_dirty, + target: opts.target.clone(), + jobs: opts.jobs, + registry: opts.registry.clone(), + }, + )?.unwrap(); + + // Upload said tarball to the specified destination + opts.config + .shell() + .status("Uploading", pkg.package_id().to_string())?; + transmit( + opts.config, + pkg, + tarball.file(), + &mut registry, + ®_id, + opts.dry_run, + )?; + + Ok(()) +} + +fn verify_dependencies(pkg: &Package, registry_src: &SourceId) -> CargoResult<()> { + for dep in pkg.dependencies().iter() { + if dep.source_id().is_path() { + if !dep.specified_req() { + bail!( + "all path dependencies must have a version specified \ + when publishing.\ndependency `{}` does not specify \ + a version", + dep.name() + ) + } + } else if dep.source_id() != registry_src { + if dep.source_id().is_registry() { + // Block requests to send to a registry if it is not an alternative + // registry + if !registry_src.is_alt_registry() { + bail!("crates cannot be published to crates.io with dependencies sourced from other\n\ + registries either publish `{}` on crates.io or pull it into this repository\n\ + and specify it with a path and version\n\ + (crate `{}` is pulled from {})", dep.name(), dep.name(), dep.source_id()); + } + } else { + bail!( + "crates cannot be published to crates.io with dependencies sourced from \ + a repository\neither publish `{}` as its own crate on crates.io and \ + specify a crates.io version as a dependency or pull it into this \ + repository and specify it with a path and version\n(crate `{}` has \ + repository path `{}`)", + dep.name(), + dep.name(), + dep.source_id() + ); + } + } + } + Ok(()) +} + +fn transmit( + config: &Config, + pkg: &Package, + tarball: &File, + registry: &mut Registry, + registry_id: &SourceId, + dry_run: bool, +) -> CargoResult<()> { + let deps = pkg.dependencies() + .iter() + .map(|dep| { + // If the dependency is from a different registry, then include the + // registry in the dependency. + let dep_registry_id = match dep.registry_id() { + Some(id) => id, + None => bail!("dependency missing registry ID"), + }; + let dep_registry = if dep_registry_id != registry_id { + Some(dep_registry_id.url().to_string()) + } else { + None + }; + + Ok(NewCrateDependency { + optional: dep.is_optional(), + default_features: dep.uses_default_features(), + name: dep.name().to_string(), + features: dep.features().iter().map(|s| s.to_string()).collect(), + version_req: dep.version_req().to_string(), + target: dep.platform().map(|s| s.to_string()), + kind: match dep.kind() { + Kind::Normal => "normal", + Kind::Build => "build", + Kind::Development => "dev", + }.to_string(), + registry: dep_registry, + }) + }) + .collect::>>()?; + let manifest = pkg.manifest(); + let ManifestMetadata { + ref authors, + ref description, + ref homepage, + ref documentation, + ref keywords, + ref readme, + ref repository, + ref license, + ref license_file, + ref categories, + ref badges, + ref links, + } = *manifest.metadata(); + let readme_content = match *readme { + Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), + None => None, + }; + if let Some(ref file) = *license_file { + if fs::metadata(&pkg.root().join(file)).is_err() { + bail!("the license file `{}` does not exist", file) + } + } + + // Do not upload if performing a dry run + if dry_run { + config.shell().warn("aborting upload due to dry run")?; + return Ok(()); + } + + let summary = pkg.summary(); + let string_features = summary + .features() + .iter() + .map(|(feat, values)| { + ( + feat.clone(), + values.iter().map(|fv| fv.to_string(&summary)).collect(), + ) + }) + .collect::>>(); + + let publish = registry.publish( + &NewCrate { + name: pkg.name().to_string(), + vers: pkg.version().to_string(), + deps, + features: string_features, + authors: authors.clone(), + description: description.clone(), + homepage: homepage.clone(), + documentation: documentation.clone(), + keywords: keywords.clone(), + categories: categories.clone(), + readme: readme_content, + readme_file: readme.clone(), + repository: repository.clone(), + license: license.clone(), + license_file: license_file.clone(), + badges: badges.clone(), + links: links.clone(), + }, + tarball, + ); + + match publish { + Ok(warnings) => { + if !warnings.invalid_categories.is_empty() { + let msg = format!( + "\ + the following are not valid category slugs and were \ + ignored: {}. Please see https://crates.io/category_slugs \ + for the list of all category slugs. \ + ", + warnings.invalid_categories.join(", ") + ); + config.shell().warn(&msg)?; + } + + if !warnings.invalid_badges.is_empty() { + let msg = format!( + "\ + the following are not valid badges and were ignored: {}. \ + Either the badge type specified is unknown or a required \ + attribute is missing. Please see \ + http://doc.crates.io/manifest.html#package-metadata \ + for valid badge types and their required attributes.", + warnings.invalid_badges.join(", ") + ); + config.shell().warn(&msg)?; + } + + Ok(()) + } + Err(e) => Err(e), + } +} + +pub fn registry_configuration( + config: &Config, + registry: Option, +) -> CargoResult { + let (index, token) = match registry { + Some(registry) => ( + Some(config.get_registry_index(®istry)?.to_string()), + config + .get_string(&format!("registries.{}.token", registry))? + .map(|p| p.val), + ), + None => { + // Checking out for default index and token + ( + config.get_string("registry.index")?.map(|p| p.val), + config.get_string("registry.token")?.map(|p| p.val), + ) + } + }; + + Ok(RegistryConfig { index, token }) +} + +pub fn registry( + config: &Config, + token: Option, + index: Option, + registry: Option, +) -> CargoResult<(Registry, SourceId)> { + // Parse all configuration options + let RegistryConfig { + token: token_config, + index: index_config, + } = registry_configuration(config, registry.clone())?; + let token = token.or(token_config); + let sid = match (index_config, index, registry) { + (_, _, Some(registry)) => SourceId::alt_registry(config, ®istry)?, + (Some(index), _, _) | (None, Some(index), _) => SourceId::for_registry(&index.to_url()?)?, + (None, None, _) => SourceId::crates_io(config)?, + }; + let api_host = { + let mut src = RegistrySource::remote(&sid, config); + src.update() + .chain_err(|| format!("failed to update {}", sid))?; + (src.config()?).unwrap().api.unwrap() + }; + let handle = http_handle(config)?; + Ok((Registry::new_handle(api_host, token, handle), sid)) +} + +/// Create a new HTTP handle with appropriate global configuration for cargo. +pub fn http_handle(config: &Config) -> CargoResult { + if config.frozen() { + bail!( + "attempting to make an HTTP request, but --frozen was \ + specified" + ) + } + if !config.network_allowed() { + bail!("can't make HTTP request in the offline mode") + } + + // The timeout option for libcurl by default times out the entire transfer, + // but we probably don't want this. Instead we only set timeouts for the + // connect phase as well as a "low speed" timeout so if we don't receive + // many bytes in a large-ish period of time then we time out. + let mut handle = Easy::new(); + configure_http_handle(config, &mut handle)?; + Ok(handle) +} + +pub fn needs_custom_http_transport(config: &Config) -> CargoResult { + let proxy_exists = http_proxy_exists(config)?; + let timeout = http_timeout(config)?; + let cainfo = config.get_path("http.cainfo")?; + let check_revoke = config.get_bool("http.check-revoke")?; + + Ok(proxy_exists || timeout.is_some() || cainfo.is_some() || check_revoke.is_some()) +} + +/// Configure a libcurl http handle with the defaults options for Cargo +pub fn configure_http_handle(config: &Config, handle: &mut Easy) -> CargoResult<()> { + // The timeout option for libcurl by default times out the entire transfer, + // but we probably don't want this. Instead we only set timeouts for the + // connect phase as well as a "low speed" timeout so if we don't receive + // many bytes in a large-ish period of time then we time out. + handle.connect_timeout(Duration::new(30, 0))?; + handle.low_speed_limit(10 /* bytes per second */)?; + handle.low_speed_time(Duration::new(30, 0))?; + handle.useragent(&version().to_string())?; + if let Some(proxy) = http_proxy(config)? { + handle.proxy(&proxy)?; + } + if let Some(cainfo) = config.get_path("http.cainfo")? { + handle.cainfo(&cainfo.val)?; + } + if let Some(check) = config.get_bool("http.check-revoke")? { + handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; + } + if let Some(timeout) = http_timeout(config)? { + handle.connect_timeout(Duration::new(timeout as u64, 0))?; + handle.low_speed_time(Duration::new(timeout as u64, 0))?; + } + Ok(()) +} + +/// Find an explicit HTTP proxy if one is available. +/// +/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified +/// via environment variables are picked up by libcurl. +fn http_proxy(config: &Config) -> CargoResult> { + if let Some(s) = config.get_string("http.proxy")? { + return Ok(Some(s.val)); + } + if let Ok(cfg) = git2::Config::open_default() { + if let Ok(s) = cfg.get_str("http.proxy") { + return Ok(Some(s.to_string())); + } + } + Ok(None) +} + +/// Determine if an http proxy exists. +/// +/// Checks the following for existence, in order: +/// +/// * cargo's `http.proxy` +/// * git's `http.proxy` +/// * `http_proxy` env var +/// * `HTTP_PROXY` env var +/// * `https_proxy` env var +/// * `HTTPS_PROXY` env var +fn http_proxy_exists(config: &Config) -> CargoResult { + if http_proxy(config)?.is_some() { + Ok(true) + } else { + Ok(["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"] + .iter() + .any(|v| env::var(v).is_ok())) + } +} + +fn http_timeout(config: &Config) -> CargoResult> { + if let Some(s) = config.get_i64("http.timeout")? { + return Ok(Some(s.val)); + } + Ok(env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) +} + +pub fn registry_login(config: &Config, token: String, registry: Option) -> CargoResult<()> { + let RegistryConfig { + token: old_token, .. + } = registry_configuration(config, registry.clone())?; + + if let Some(old_token) = old_token { + if old_token == token { + return Ok(()); + } + } + + config::save_credentials(config, token, registry) +} + +pub struct OwnersOptions { + pub krate: Option, + pub token: Option, + pub index: Option, + pub to_add: Option>, + pub to_remove: Option>, + pub list: bool, + pub registry: Option, +} + +pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> { + let name = match opts.krate { + Some(ref name) => name.clone(), + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + + let (mut registry, _) = registry( + config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + )?; + + if let Some(ref v) = opts.to_add { + let v = v.iter().map(|s| &s[..]).collect::>(); + let msg = registry + .add_owners(&name, &v) + .map_err(|e| format_err!("failed to invite owners to crate {}: {}", name, e))?; + + config.shell().status("Owner", msg)?; + } + + if let Some(ref v) = opts.to_remove { + let v = v.iter().map(|s| &s[..]).collect::>(); + config + .shell() + .status("Owner", format!("removing {:?} from crate {}", v, name))?; + registry + .remove_owners(&name, &v) + .chain_err(|| format!("failed to remove owners from crate {}", name))?; + } + + if opts.list { + let owners = registry + .list_owners(&name) + .chain_err(|| format!("failed to list owners of crate {}", name))?; + for owner in owners.iter() { + print!("{}", owner.login); + match (owner.name.as_ref(), owner.email.as_ref()) { + (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), + (Some(s), None) | (None, Some(s)) => println!(" ({})", s), + (None, None) => println!(), + } + } + } + + Ok(()) +} + +pub fn yank( + config: &Config, + krate: Option, + version: Option, + token: Option, + index: Option, + undo: bool, + reg: Option, +) -> CargoResult<()> { + let name = match krate { + Some(name) => name, + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + let version = match version { + Some(v) => v, + None => bail!("a version must be specified to yank"), + }; + + let (mut registry, _) = registry(config, token, index, reg)?; + + if undo { + config + .shell() + .status("Unyank", format!("{}:{}", name, version))?; + registry + .unyank(&name, &version) + .chain_err(|| "failed to undo a yank")?; + } else { + config + .shell() + .status("Yank", format!("{}:{}", name, version))?; + registry + .yank(&name, &version) + .chain_err(|| "failed to yank")?; + } + + Ok(()) +} + +pub fn search( + query: &str, + config: &Config, + index: Option, + limit: u32, + reg: Option, +) -> CargoResult<()> { + fn truncate_with_ellipsis(s: &str, max_width: usize) -> String { + // We should truncate at grapheme-boundary and compute character-widths, + // yet the dependencies on unicode-segmentation and unicode-width are + // not worth it. + let mut chars = s.chars(); + let mut prefix = (&mut chars).take(max_width - 1).collect::(); + if chars.next().is_some() { + prefix.push('…'); + } + prefix + } + + let (mut registry, _) = registry(config, None, index, reg)?; + let (crates, total_crates) = registry + .search(query, limit) + .chain_err(|| "failed to retrieve search results from the registry")?; + + let names = crates + .iter() + .map(|krate| format!("{} = \"{}\"", krate.name, krate.max_version)) + .collect::>(); + + let description_margin = names.iter().map(|s| s.len() + 4).max().unwrap_or_default(); + + let description_length = cmp::max(80, 128 - description_margin); + + let descriptions = crates.iter().map(|krate| { + krate + .description + .as_ref() + .map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), description_length)) + }); + + for (name, description) in names.into_iter().zip(descriptions) { + let line = match description { + Some(desc) => { + let space = repeat(' ') + .take(description_margin - name.len()) + .collect::(); + name + &space + "# " + &desc + } + None => name, + }; + println!("{}", line); + } + + let search_max_limit = 100; + if total_crates > limit && limit < search_max_limit { + println!( + "... and {} crates more (use --limit N to see more)", + total_crates - limit + ); + } else if total_crates > limit && limit >= search_max_limit { + println!( + "... and {} crates more (go to http://crates.io/search?q={} to see more)", + total_crates - limit, + percent_encode(query.as_bytes(), QUERY_ENCODE_SET) + ); + } + + Ok(()) +} diff --git a/src/cargo/ops/resolve.rs b/src/cargo/ops/resolve.rs new file mode 100644 index 000000000..5a700a7d2 --- /dev/null +++ b/src/cargo/ops/resolve.rs @@ -0,0 +1,559 @@ +use std::collections::HashSet; + +use core::{PackageId, PackageIdSpec, PackageSet, Source, SourceId, Workspace}; +use core::registry::PackageRegistry; +use core::resolver::{self, Method, Resolve}; +use sources::PathSource; +use ops; +use util::profile; +use util::errors::{CargoResult, CargoResultExt}; + +/// Resolve all dependencies for the workspace using the previous +/// lockfile as a guide if present. +/// +/// This function will also write the result of resolution as a new +/// lockfile. +pub fn resolve_ws<'a>(ws: &Workspace<'a>) -> CargoResult<(PackageSet<'a>, Resolve)> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = resolve_with_registry(ws, &mut registry, true)?; + let packages = get_resolved_packages(&resolve, registry); + Ok((packages, resolve)) +} + +/// Resolves dependencies for some packages of the workspace, +/// taking into account `paths` overrides and activated features. +pub fn resolve_ws_precisely<'a>( + ws: &Workspace<'a>, + source: Option>, + features: &[String], + all_features: bool, + no_default_features: bool, + specs: &[PackageIdSpec], +) -> CargoResult<(PackageSet<'a>, Resolve)> { + let features = Method::split_features(features); + let method = if all_features { + Method::Everything + } else { + Method::Required { + dev_deps: true, + features: &features, + all_features: false, + uses_default_features: !no_default_features, + } + }; + resolve_ws_with_method(ws, source, method, specs) +} + +pub fn resolve_ws_with_method<'a>( + ws: &Workspace<'a>, + source: Option>, + method: Method, + specs: &[PackageIdSpec], +) -> CargoResult<(PackageSet<'a>, Resolve)> { + let mut registry = PackageRegistry::new(ws.config())?; + if let Some(source) = source { + registry.add_preloaded(source); + } + let mut add_patches = true; + + let resolve = if ws.require_optional_deps() { + // First, resolve the root_package's *listed* dependencies, as well as + // downloading and updating all remotes and such. + let resolve = resolve_with_registry(ws, &mut registry, false)?; + add_patches = false; + + // Second, resolve with precisely what we're doing. Filter out + // transitive dependencies if necessary, specify features, handle + // overrides, etc. + let _p = profile::start("resolving w/ overrides..."); + + add_overrides(&mut registry, ws)?; + + for &(ref replace_spec, ref dep) in ws.root_replace() { + if !resolve + .iter() + .any(|r| replace_spec.matches(r) && !dep.matches_id(r)) + { + ws.config() + .shell() + .warn(format!("package replacement is not used: {}", replace_spec))? + } + } + + Some(resolve) + } else { + ops::load_pkg_lockfile(ws)? + }; + + let resolved_with_overrides = ops::resolve_with_previous( + &mut registry, + ws, + method, + resolve.as_ref(), + None, + specs, + add_patches, + true, + )?; + + let packages = get_resolved_packages(&resolved_with_overrides, registry); + + Ok((packages, resolved_with_overrides)) +} + +fn resolve_with_registry<'cfg>( + ws: &Workspace<'cfg>, + registry: &mut PackageRegistry<'cfg>, + warn: bool, +) -> CargoResult { + let prev = ops::load_pkg_lockfile(ws)?; + let resolve = resolve_with_previous( + registry, + ws, + Method::Everything, + prev.as_ref(), + None, + &[], + true, + warn, + )?; + + if !ws.is_ephemeral() { + ops::write_pkg_lockfile(ws, &resolve)?; + } + Ok(resolve) +} + +/// Resolve all dependencies for a package using an optional previous instance +/// of resolve to guide the resolution process. +/// +/// This also takes an optional hash set, `to_avoid`, which is a list of package +/// ids that should be avoided when consulting the previous instance of resolve +/// (often used in pairings with updates). +/// +/// The previous resolve normally comes from a lockfile. This function does not +/// read or write lockfiles from the filesystem. +pub fn resolve_with_previous<'a, 'cfg>( + registry: &mut PackageRegistry<'cfg>, + ws: &Workspace<'cfg>, + method: Method, + previous: Option<&'a Resolve>, + to_avoid: Option<&HashSet<&'a PackageId>>, + specs: &[PackageIdSpec], + register_patches: bool, + warn: bool, +) -> CargoResult { + // Here we place an artificial limitation that all non-registry sources + // cannot be locked at more than one revision. This means that if a git + // repository provides more than one package, they must all be updated in + // step when any of them are updated. + // + // TODO: This seems like a hokey reason to single out the registry as being + // different + let mut to_avoid_sources = HashSet::new(); + if let Some(to_avoid) = to_avoid { + to_avoid_sources.extend( + to_avoid + .iter() + .map(|p| p.source_id()) + .filter(|s| !s.is_registry()), + ); + } + + let ref keep = |p: &&'a PackageId| { + !to_avoid_sources.contains(&p.source_id()) && match to_avoid { + Some(set) => !set.contains(p), + None => true, + } + }; + + // In the case where a previous instance of resolve is available, we + // want to lock as many packages as possible to the previous version + // without disturbing the graph structure. + let mut try_to_use = HashSet::new(); + if let Some(r) = previous { + trace!("previous: {:?}", r); + register_previous_locks(ws, registry, r, keep); + + // Everything in the previous lock file we want to keep is prioritized + // in dependency selection if it comes up, aka we want to have + // conservative updates. + try_to_use.extend(r.iter().filter(keep).inspect(|id| { + debug!("attempting to prefer {}", id); + })); + } + + if register_patches { + for (url, patches) in ws.root_patch() { + let previous = match previous { + Some(r) => r, + None => { + registry.patch(url, patches)?; + continue; + } + }; + let patches = patches + .iter() + .map(|dep| { + let unused = previous.unused_patches(); + let candidates = previous.iter().chain(unused); + match candidates.filter(keep).find(|id| dep.matches_id(id)) { + Some(id) => { + let mut dep = dep.clone(); + dep.lock_to(id); + dep + } + None => dep.clone(), + } + }) + .collect::>(); + registry.patch(url, &patches)?; + } + + registry.lock_patches(); + } + + for member in ws.members() { + registry.add_sources(&[member.package_id().source_id().clone()])?; + } + + let mut summaries = Vec::new(); + if ws.config().cli_unstable().package_features { + let mut members = Vec::new(); + match method { + Method::Everything => members.extend(ws.members()), + Method::Required { + features, + all_features, + uses_default_features, + .. + } => { + if specs.len() > 1 && !features.is_empty() { + bail!("cannot specify features for more than one package"); + } + members.extend( + ws.members() + .filter(|m| specs.iter().any(|spec| spec.matches(m.package_id()))), + ); + // Edge case: running `cargo build -p foo`, where `foo` is not a member + // of current workspace. Add all packages from workspace to get `foo` + // into the resolution graph. + if members.is_empty() { + if !(features.is_empty() && !all_features && uses_default_features) { + bail!("cannot specify features for packages outside of workspace"); + } + members.extend(ws.members()); + } + } + } + for member in members { + let summary = registry.lock(member.summary().clone()); + summaries.push((summary, method)) + } + } else { + for member in ws.members() { + let method_to_resolve = match method { + // When everything for a workspace we want to be sure to resolve all + // members in the workspace, so propagate the `Method::Everything`. + Method::Everything => Method::Everything, + + // If we're not resolving everything though then we're constructing the + // exact crate graph we're going to build. Here we don't necessarily + // want to keep around all workspace crates as they may not all be + // built/tested. + // + // Additionally, the `method` specified represents command line + // flags, which really only matters for the current package + // (determined by the cwd). If other packages are specified (via + // `-p`) then the command line flags like features don't apply to + // them. + // + // As a result, if this `member` is the current member of the + // workspace, then we use `method` specified. Otherwise we use a + // base method with no features specified but using default features + // for any other packages specified with `-p`. + Method::Required { dev_deps, all_features, .. } => { + let base = Method::Required { + dev_deps, + features: &[], + all_features, + uses_default_features: true, + }; + let member_id = member.package_id(); + match ws.current_opt() { + Some(current) if member_id == current.package_id() => method, + _ => { + if specs.iter().any(|spec| spec.matches(member_id)) { + base + } else { + continue; + } + } + } + } + }; + + let summary = registry.lock(member.summary().clone()); + summaries.push((summary, method_to_resolve)); + } + }; + + let root_replace = ws.root_replace(); + + let replace = match previous { + Some(r) => root_replace + .iter() + .map(|&(ref spec, ref dep)| { + for (key, val) in r.replacements().iter() { + if spec.matches(key) && dep.matches_id(val) && keep(&val) { + let mut dep = dep.clone(); + dep.lock_to(val); + return (spec.clone(), dep); + } + } + (spec.clone(), dep.clone()) + }) + .collect::>(), + None => root_replace.to_vec(), + }; + + ws.preload(registry); + let mut resolved = resolver::resolve( + &summaries, + &replace, + registry, + &try_to_use, + Some(ws.config()), + warn, + )?; + resolved.register_used_patches(registry.patches()); + if let Some(previous) = previous { + resolved.merge_from(previous)?; + } + Ok(resolved) +} + +/// Read the `paths` configuration variable to discover all path overrides that +/// have been configured. +fn add_overrides<'a>(registry: &mut PackageRegistry<'a>, ws: &Workspace<'a>) -> CargoResult<()> { + let paths = match ws.config().get_list("paths")? { + Some(list) => list, + None => return Ok(()), + }; + + let paths = paths.val.iter().map(|&(ref s, ref p)| { + // The path listed next to the string is the config file in which the + // key was located, so we want to pop off the `.cargo/config` component + // to get the directory containing the `.cargo` folder. + (p.parent().unwrap().parent().unwrap().join(s), p) + }); + + for (path, definition) in paths { + let id = SourceId::for_path(&path)?; + let mut source = PathSource::new_recursive(&path, &id, ws.config()); + source.update().chain_err(|| { + format!( + "failed to update path override `{}` \ + (defined in `{}`)", + path.display(), + definition.display() + ) + })?; + registry.add_override(Box::new(source)); + } + Ok(()) +} + +fn get_resolved_packages<'a>(resolve: &Resolve, registry: PackageRegistry<'a>) -> PackageSet<'a> { + let ids: Vec = resolve.iter().cloned().collect(); + registry.get(&ids) +} + +/// In this function we're responsible for informing the `registry` of all +/// locked dependencies from the previous lock file we had, `resolve`. +/// +/// This gets particularly tricky for a couple of reasons. The first is that we +/// want all updates to be conservative, so we actually want to take the +/// `resolve` into account (and avoid unnecessary registry updates and such). +/// the second, however, is that we want to be resilient to updates of +/// manifests. For example if a dependency is added or a version is changed we +/// want to make sure that we properly re-resolve (conservatively) instead of +/// providing an opaque error. +/// +/// The logic here is somewhat subtle but there should be more comments below to +/// help out, and otherwise feel free to ask on IRC if there's questions! +/// +/// Note that this function, at the time of this writing, is basically the +/// entire fix for #4127 +fn register_previous_locks<'a>( + ws: &Workspace, + registry: &mut PackageRegistry, + resolve: &'a Resolve, + keep: &Fn(&&'a PackageId) -> bool, +) { + let path_pkg = |id: &SourceId| { + if !id.is_path() { + return None; + } + if let Ok(path) = id.url().to_file_path() { + if let Ok(pkg) = ws.load(&path.join("Cargo.toml")) { + return Some(pkg); + } + } + None + }; + + // Ok so we've been passed in a `keep` function which basically says "if I + // return true then this package wasn't listed for an update on the command + // line". AKA if we run `cargo update -p foo` then `keep(bar)` will return + // `true`, whereas `keep(foo)` will return `true` (roughly). + // + // This isn't actually quite what we want, however. Instead we want to + // further refine this `keep` function with *all transitive dependencies* of + // the packages we're not keeping. For example consider a case like this: + // + // * There's a crate `log` + // * There's a crate `serde` which depends on `log` + // + // Let's say we then run `cargo update -p serde`. This may *also* want to + // update the `log` dependency as our newer version of `serde` may have a + // new minimum version required for `log`. Now this isn't always guaranteed + // to work. What'll happen here is we *won't* lock the `log` dependency nor + // the `log` crate itself, but we will inform the registry "please prefer + // this version of `log`". That way if our newer version of serde works with + // the older version of `log`, we conservatively won't update `log`. If, + // however, nothing else in the dependency graph depends on `log` and the + // newer version of `serde` requires a new version of `log` it'll get pulled + // in (as we didn't accidentally lock it to an old version). + // + // Additionally here we process all path dependencies listed in the previous + // resolve. They can not only have their dependencies change but also + // the versions of the package change as well. If this ends up happening + // then we want to make sure we don't lock a package id node that doesn't + // actually exist. Note that we don't do transitive visits of all the + // package's dependencies here as that'll be covered below to poison those + // if they changed. + let mut avoid_locking = HashSet::new(); + for node in resolve.iter() { + if !keep(&node) { + add_deps(resolve, node, &mut avoid_locking); + } else if let Some(pkg) = path_pkg(node.source_id()) { + if pkg.package_id() != node { + avoid_locking.insert(node); + } + } + } + + // Ok but the above loop isn't the entire story! Updates to the dependency + // graph can come from two locations, the `cargo update` command or + // manifests themselves. For example a manifest on the filesystem may + // have been updated to have an updated version requirement on `serde`. In + // this case both `keep(serde)` and `keep(log)` return `true` (the `keep` + // that's an argument to this function). We, however, don't want to keep + // either of those! Otherwise we'll get obscure resolve errors about locked + // versions. + // + // To solve this problem we iterate over all packages with path sources + // (aka ones with manifests that are changing) and take a look at all of + // their dependencies. If any dependency does not match something in the + // previous lock file, then we're guaranteed that the main resolver will + // update the source of this dependency no matter what. Knowing this we + // poison all packages from the same source, forcing them all to get + // updated. + // + // This may seem like a heavy hammer, and it is! It means that if you change + // anything from crates.io then all of crates.io becomes unlocked. Note, + // however, that we still want conservative updates. This currently happens + // because the first candidate the resolver picks is the previously locked + // version, and only if that fails to activate to we move on and try + // a different version. (giving the guise of conservative updates) + // + // For example let's say we had `serde = "0.1"` written in our lock file. + // When we later edit this to `serde = "0.1.3"` we don't want to lock serde + // at its old version, 0.1.1. Instead we want to allow it to update to + // `0.1.3` and update its own dependencies (like above). To do this *all + // crates from crates.io* are not locked (aka added to `avoid_locking`). + // For dependencies like `log` their previous version in the lock file will + // come up first before newer version, if newer version are available. + let mut path_deps = ws.members().cloned().collect::>(); + let mut visited = HashSet::new(); + while let Some(member) = path_deps.pop() { + if !visited.insert(member.package_id().clone()) { + continue; + } + for dep in member.dependencies() { + // If this dependency didn't match anything special then we may want + // to poison the source as it may have been added. If this path + // dependencies is *not* a workspace member, however, and it's an + // optional/non-transitive dependency then it won't be necessarily + // be in our lock file. If this shows up then we avoid poisoning + // this source as otherwise we'd repeatedly update the registry. + // + // TODO: this breaks adding an optional dependency in a + // non-workspace member and then simultaneously editing the + // dependency on that crate to enable the feature. For now + // this bug is better than the always updating registry + // though... + if !ws.members() + .any(|pkg| pkg.package_id() == member.package_id()) + && (dep.is_optional() || !dep.is_transitive()) + { + continue; + } + + // If this is a path dependency then try to push it onto our + // worklist + if let Some(pkg) = path_pkg(dep.source_id()) { + path_deps.push(pkg); + continue; + } + + // If we match *anything* in the dependency graph then we consider + // ourselves A-OK and assume that we'll resolve to that. + if resolve.iter().any(|id| dep.matches_ignoring_source(id)) { + continue; + } + + // Ok if nothing matches, then we poison the source of this + // dependencies and the previous lock file. + debug!( + "poisoning {} because {} looks like it changed {}", + dep.source_id(), + member.package_id(), + dep.name() + ); + for id in resolve + .iter() + .filter(|id| id.source_id() == dep.source_id()) + { + add_deps(resolve, id, &mut avoid_locking); + } + } + } + + // Alright now that we've got our new, fresh, shiny, and refined `keep` + // function let's put it to action. Take a look at the previous lockfile, + // filter everything by this callback, and then shove everything else into + // the registry as a locked dependency. + let ref keep = |id: &&'a PackageId| keep(id) && !avoid_locking.contains(id); + + for node in resolve.iter().filter(keep) { + let deps = resolve + .deps_not_replaced(node) + .filter(keep) + .cloned() + .collect(); + registry.register_lock(node.clone(), deps); + } + + /// recursively add `node` and all its transitive dependencies to `set` + fn add_deps<'a>(resolve: &'a Resolve, node: &'a PackageId, set: &mut HashSet<&'a PackageId>) { + if !set.insert(node) { + return; + } + debug!("ignoring any lock pointing directly at {}", node); + for dep in resolve.deps_not_replaced(node) { + add_deps(resolve, dep, set); + } + } +} diff --git a/src/cargo/sources/config.rs b/src/cargo/sources/config.rs new file mode 100644 index 000000000..991c3c631 --- /dev/null +++ b/src/cargo/sources/config.rs @@ -0,0 +1,244 @@ +//! Implementation of configuration for various sources +//! +//! This module will parse the various `source.*` TOML configuration keys into a +//! structure usable by Cargo itself. Currently this is primarily used to map +//! sources to one another via the `replace-with` key in `.cargo/config`. + +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use url::Url; + +use core::{GitReference, Source, SourceId}; +use sources::ReplacedSource; +use util::{Config, ToUrl}; +use util::config::ConfigValue; +use util::errors::{CargoResult, CargoResultExt}; + +#[derive(Clone)] +pub struct SourceConfigMap<'cfg> { + cfgs: HashMap, + id2name: HashMap, + config: &'cfg Config, +} + +/// Configuration for a particular source, found in TOML looking like: +/// +/// ```toml +/// [source.crates-io] +/// registry = 'https://github.com/rust-lang/crates.io-index' +/// replace-with = 'foo' # optional +/// ``` +#[derive(Clone)] +struct SourceConfig { + // id this source corresponds to, inferred from the various defined keys in + // the configuration + id: SourceId, + + // Name of the source that this source should be replaced with. This field + // is a tuple of (name, path) where path is where this configuration key was + // defined (the literal `.cargo/config` file). + replace_with: Option<(String, PathBuf)>, +} + +impl<'cfg> SourceConfigMap<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap::empty(config)?; + if let Some(table) = config.get_table("source")? { + for (key, value) in table.val.iter() { + base.add_config(key, value)?; + } + } + Ok(base) + } + + pub fn empty(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap { + cfgs: HashMap::new(), + id2name: HashMap::new(), + config, + }; + base.add( + "crates-io", + SourceConfig { + id: SourceId::crates_io(config)?, + replace_with: None, + }, + ); + Ok(base) + } + + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn load(&self, id: &SourceId) -> CargoResult> { + debug!("loading: {}", id); + let mut name = match self.id2name.get(id) { + Some(name) => name, + None => return Ok(id.load(self.config)?), + }; + let mut path = Path::new("/"); + let orig_name = name; + let new_id; + loop { + let cfg = match self.cfgs.get(name) { + Some(cfg) => cfg, + None => bail!( + "could not find a configured source with the \ + name `{}` when attempting to lookup `{}` \ + (configuration in `{}`)", + name, + orig_name, + path.display() + ), + }; + match cfg.replace_with { + Some((ref s, ref p)) => { + name = s; + path = p; + } + None if *id == cfg.id => return Ok(id.load(self.config)?), + None => { + new_id = cfg.id.with_precise(id.precise().map(|s| s.to_string())); + break; + } + } + debug!("following pointer to {}", name); + if name == orig_name { + bail!( + "detected a cycle of `replace-with` sources, the source \ + `{}` is eventually replaced with itself \ + (configuration in `{}`)", + name, + path.display() + ) + } + } + let new_src = new_id.load(self.config)?; + let old_src = id.load(self.config)?; + if !new_src.supports_checksums() && old_src.supports_checksums() { + bail!( + "\ +cannot replace `{orig}` with `{name}`, the source `{orig}` supports \ +checksums, but `{name}` does not + +a lock file compatible with `{orig}` cannot be generated in this situation +", + orig = orig_name, + name = name + ); + } + + if old_src.requires_precise() && id.precise().is_none() { + bail!( + "\ +the source {orig} requires a lock file to be present first before it can be +used against vendored source code + +remove the source replacement configuration, generate a lock file, and then +restore the source replacement configuration to continue the build +", + orig = orig_name + ); + } + + Ok(Box::new(ReplacedSource::new(id, &new_id, new_src))) + } + + fn add(&mut self, name: &str, cfg: SourceConfig) { + self.id2name.insert(cfg.id.clone(), name.to_string()); + self.cfgs.insert(name.to_string(), cfg); + } + + fn add_config(&mut self, name: &str, cfg: &ConfigValue) -> CargoResult<()> { + let (table, _path) = cfg.table(&format!("source.{}", name))?; + let mut srcs = Vec::new(); + if let Some(val) = table.get("registry") { + let url = url(val, &format!("source.{}.registry", name))?; + srcs.push(SourceId::for_registry(&url)?); + } + if let Some(val) = table.get("local-registry") { + let (s, path) = val.string(&format!("source.{}.local-registry", name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_local_registry(&path)?); + } + if let Some(val) = table.get("directory") { + let (s, path) = val.string(&format!("source.{}.directory", name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_directory(&path)?); + } + if let Some(val) = table.get("git") { + let url = url(val, &format!("source.{}.git", name))?; + let try = |s: &str| { + let val = match table.get(s) { + Some(s) => s, + None => return Ok(None), + }; + let key = format!("source.{}.{}", name, s); + val.string(&key).map(Some) + }; + let reference = match try("branch")? { + Some(b) => GitReference::Branch(b.0.to_string()), + None => match try("tag")? { + Some(b) => GitReference::Tag(b.0.to_string()), + None => match try("rev")? { + Some(b) => GitReference::Rev(b.0.to_string()), + None => GitReference::Branch("master".to_string()), + }, + }, + }; + srcs.push(SourceId::for_git(&url, reference)?); + } + if name == "crates-io" && srcs.is_empty() { + srcs.push(SourceId::crates_io(self.config)?); + } + + let mut srcs = srcs.into_iter(); + let src = srcs.next().ok_or_else(|| { + format_err!( + "no source URL specified for `source.{}`, need \ + either `registry` or `local-registry` defined", + name + ) + })?; + if srcs.next().is_some() { + bail!("more than one source URL specified for `source.{}`", name) + } + + let mut replace_with = None; + if let Some(val) = table.get("replace-with") { + let (s, path) = val.string(&format!("source.{}.replace-with", name))?; + replace_with = Some((s.to_string(), path.to_path_buf())); + } + + self.add( + name, + SourceConfig { + id: src, + replace_with, + }, + ); + + return Ok(()); + + fn url(cfg: &ConfigValue, key: &str) -> CargoResult { + let (url, path) = cfg.string(key)?; + let url = url.to_url().chain_err(|| { + format!( + "configuration key `{}` specified an invalid \ + URL (in {})", + key, + path.display() + ) + })?; + Ok(url) + } + } +} diff --git a/src/cargo/sources/directory.rs b/src/cargo/sources/directory.rs new file mode 100644 index 000000000..bf20a270d --- /dev/null +++ b/src/cargo/sources/directory.rs @@ -0,0 +1,201 @@ +use std::collections::HashMap; +use std::fmt::{self, Debug, Formatter}; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; + +use hex; + +use serde_json; + +use core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use sources::PathSource; +use util::{Config, Sha256}; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; + +pub struct DirectorySource<'cfg> { + source_id: SourceId, + root: PathBuf, + packages: HashMap, + config: &'cfg Config, +} + +#[derive(Deserialize)] +struct Checksum { + package: Option, + files: HashMap, +} + +impl<'cfg> DirectorySource<'cfg> { + pub fn new(path: &Path, id: &SourceId, config: &'cfg Config) -> DirectorySource<'cfg> { + DirectorySource { + source_id: id.clone(), + root: path.to_path_buf(), + config, + packages: HashMap::new(), + } + } +} + +impl<'cfg> Debug for DirectorySource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "DirectorySource {{ root: {:?} }}", self.root) + } +} + +impl<'cfg> Source for DirectorySource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + let packages = self.packages.values().map(|p| &p.0); + let matches = packages.filter(|pkg| dep.matches(pkg.summary())); + for summary in matches.map(|pkg| pkg.summary().clone()) { + f(summary); + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + true + } + + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + self.packages.clear(); + let entries = self.root.read_dir().chain_err(|| { + format!( + "failed to read root of directory source: {}", + self.root.display() + ) + })?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + // Ignore hidden/dot directories as they typically don't contain + // crates and otherwise may conflict with a VCS + // (rust-lang/cargo#3414). + if let Some(s) = path.file_name().and_then(|s| s.to_str()) { + if s.starts_with('.') { + continue; + } + } + + // Vendor directories are often checked into a VCS, but throughout + // the lifetime of a vendor dir crates are often added and deleted. + // Some VCS implementations don't always fully delete the directory + // when a dir is removed from a different checkout. Sometimes a + // mostly-empty dir is left behind. + // + // Additionally vendor directories are sometimes accompanied with + // readme files and other auxiliary information not too interesting + // to Cargo. + // + // To help handle all this we only try processing folders with a + // `Cargo.toml` in them. This has the upside of being pretty + // flexible with the contents of vendor directories but has the + // downside of accidentally misconfigured vendor directories + // silently returning less crates. + if !path.join("Cargo.toml").exists() { + continue; + } + + let mut src = PathSource::new(&path, &self.source_id, self.config); + src.update()?; + let pkg = src.root_package()?; + + let cksum_file = path.join(".cargo-checksum.json"); + let cksum = paths::read(&path.join(cksum_file)).chain_err(|| { + format!( + "failed to load checksum `.cargo-checksum.json` \ + of {} v{}", + pkg.package_id().name(), + pkg.package_id().version() + ) + })?; + let cksum: Checksum = serde_json::from_str(&cksum).chain_err(|| { + format!( + "failed to decode `.cargo-checksum.json` of \ + {} v{}", + pkg.package_id().name(), + pkg.package_id().version() + ) + })?; + + let mut manifest = pkg.manifest().clone(); + let mut summary = manifest.summary().clone(); + if let Some(ref package) = cksum.package { + summary = summary.set_checksum(package.clone()); + } + manifest.set_summary(summary); + let pkg = Package::new(manifest, pkg.manifest_path()); + self.packages.insert(pkg.package_id().clone(), (pkg, cksum)); + } + + Ok(()) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + self.packages + .get(id) + .map(|p| &p.0) + .cloned() + .ok_or_else(|| format_err!("failed to find package with id: {}", id)) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } + + fn verify(&self, id: &PackageId) -> CargoResult<()> { + let (pkg, cksum) = match self.packages.get(id) { + Some(&(ref pkg, ref cksum)) => (pkg, cksum), + None => bail!("failed to find entry for `{}` in directory source", id), + }; + + let mut buf = [0; 16 * 1024]; + for (file, cksum) in cksum.files.iter() { + let mut h = Sha256::new(); + let file = pkg.root().join(file); + + (|| -> CargoResult<()> { + let mut f = File::open(&file)?; + loop { + match f.read(&mut buf)? { + 0 => return Ok(()), + n => h.update(&buf[..n]), + } + } + })() + .chain_err(|| format!("failed to calculate checksum of: {}", file.display()))?; + + let actual = hex::encode(h.finish()); + if &*actual != cksum { + bail!( + "\ + the listed checksum of `{}` has changed:\n\ + expected: {}\n\ + actual: {}\n\ + \n\ + directory sources are not intended to be edited, if \ + modifications are required then it is recommended \ + that [replace] is used with a forked copy of the \ + source\ + ", + file.display(), + cksum, + actual + ); + } + } + + Ok(()) + } +} diff --git a/src/cargo/sources/git/mod.rs b/src/cargo/sources/git/mod.rs new file mode 100644 index 000000000..0b4378654 --- /dev/null +++ b/src/cargo/sources/git/mod.rs @@ -0,0 +1,4 @@ +pub use self::utils::{fetch, GitCheckout, GitDatabase, GitRemote, GitRevision}; +pub use self::source::{canonicalize_url, GitSource}; +mod utils; +mod source; diff --git a/src/cargo/sources/git/source.rs b/src/cargo/sources/git/source.rs new file mode 100644 index 000000000..fdf3e6082 --- /dev/null +++ b/src/cargo/sources/git/source.rs @@ -0,0 +1,278 @@ +use std::fmt::{self, Debug, Formatter}; + +use url::Url; + +use core::source::{Source, SourceId}; +use core::GitReference; +use core::{Dependency, Package, PackageId, Summary}; +use util::Config; +use util::errors::CargoResult; +use util::hex::short_hash; +use sources::PathSource; +use sources::git::utils::{GitRemote, GitRevision}; + +pub struct GitSource<'cfg> { + remote: GitRemote, + reference: GitReference, + source_id: SourceId, + path_source: Option>, + rev: Option, + ident: String, + config: &'cfg Config, +} + +impl<'cfg> GitSource<'cfg> { + pub fn new(source_id: &SourceId, config: &'cfg Config) -> CargoResult> { + assert!(source_id.is_git(), "id is not git, id={}", source_id); + + let remote = GitRemote::new(source_id.url()); + let ident = ident(source_id.url())?; + + let reference = match source_id.precise() { + Some(s) => GitReference::Rev(s.to_string()), + None => source_id.git_reference().unwrap().clone(), + }; + + let source = GitSource { + remote, + reference, + source_id: source_id.clone(), + path_source: None, + rev: None, + ident, + config, + }; + + Ok(source) + } + + pub fn url(&self) -> &Url { + self.remote.url() + } + + pub fn read_packages(&mut self) -> CargoResult> { + if self.path_source.is_none() { + self.update()?; + } + self.path_source.as_mut().unwrap().read_packages() + } +} + +fn ident(url: &Url) -> CargoResult { + let url = canonicalize_url(url)?; + let ident = url.path_segments() + .and_then(|mut s| s.next_back()) + .unwrap_or(""); + + let ident = if ident == "" { "_empty" } else { ident }; + + Ok(format!("{}-{}", ident, short_hash(&url))) +} + +// Some hacks and heuristics for making equivalent URLs hash the same +pub fn canonicalize_url(url: &Url) -> CargoResult { + let mut url = url.clone(); + + // cannot-be-a-base-urls are not supported + // eg. github.com:rust-lang-nursery/rustfmt.git + if url.cannot_be_a_base() { + bail!( + "invalid url `{}`: cannot-be-a-base-URLs are not supported", + url + ) + } + + // Strip a trailing slash + if url.path().ends_with('/') { + url.path_segments_mut().unwrap().pop_if_empty(); + } + + // HACKHACK: For GitHub URL's specifically just lowercase + // everything. GitHub treats both the same, but they hash + // differently, and we're gonna be hashing them. This wants a more + // general solution, and also we're almost certainly not using the + // same case conversion rules that GitHub does. (#84) + if url.host_str() == Some("github.com") { + url.set_scheme("https").unwrap(); + let path = url.path().to_lowercase(); + url.set_path(&path); + } + + // Repos generally can be accessed with or w/o '.git' + let needs_chopping = url.path().ends_with(".git"); + if needs_chopping { + let last = { + let last = url.path_segments().unwrap().next_back().unwrap(); + last[..last.len() - 4].to_owned() + }; + url.path_segments_mut().unwrap().pop().push(&last); + } + + Ok(url) +} + +impl<'cfg> Debug for GitSource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "git repo at {}", self.remote.url())?; + + match self.reference.pretty_ref() { + Some(s) => write!(f, " ({})", s), + None => Ok(()), + } + } +} + +impl<'cfg> Source for GitSource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + let src = self.path_source + .as_mut() + .expect("BUG: update() must be called before query()"); + src.query(dep, f) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + true + } + + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + let lock = + self.config + .git_path() + .open_rw(".cargo-lock-git", self.config, "the git checkouts")?; + + let db_path = lock.parent().join("db").join(&self.ident); + + if self.config.cli_unstable().offline && !db_path.exists() { + bail!( + "can't checkout from '{}': you are in the offline mode (-Z offline)", + self.remote.url() + ); + } + + // Resolve our reference to an actual revision, and check if the + // database already has that revision. If it does, we just load a + // database pinned at that revision, and if we don't we issue an update + // to try to find the revision. + let actual_rev = self.remote.rev_for(&db_path, &self.reference); + let should_update = actual_rev.is_err() || self.source_id.precise().is_none(); + + let (db, actual_rev) = if should_update && !self.config.cli_unstable().offline { + self.config.shell().status( + "Updating", + format!("git repository `{}`", self.remote.url()), + )?; + + trace!("updating git source `{:?}`", self.remote); + + self.remote + .checkout(&db_path, &self.reference, self.config)? + } else { + (self.remote.db_at(&db_path)?, actual_rev.unwrap()) + }; + + // Don’t use the full hash, + // to contribute less to reaching the path length limit on Windows: + // https://github.com/servo/servo/pull/14397 + let short_id = db.to_short_id(actual_rev.clone()).unwrap(); + + let checkout_path = lock.parent() + .join("checkouts") + .join(&self.ident) + .join(short_id.as_str()); + + // Copy the database to the checkout location. After this we could drop + // the lock on the database as we no longer needed it, but we leave it + // in scope so the destructors here won't tamper with too much. + // Checkout is immutable, so we don't need to protect it with a lock once + // it is created. + db.copy_to(actual_rev.clone(), &checkout_path, self.config)?; + + let source_id = self.source_id.with_precise(Some(actual_rev.to_string())); + let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config); + + self.path_source = Some(path_source); + self.rev = Some(actual_rev); + self.path_source.as_mut().unwrap().update() + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + trace!( + "getting packages for package id `{}` from `{:?}`", + id, + self.remote + ); + self.path_source + .as_mut() + .expect("BUG: update() must be called before get()") + .download(id) + } + + fn fingerprint(&self, _pkg: &Package) -> CargoResult { + Ok(self.rev.as_ref().unwrap().to_string()) + } +} + +#[cfg(test)] +mod test { + use url::Url; + use super::ident; + use util::ToUrl; + + #[test] + pub fn test_url_to_path_ident_with_path() { + let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap(); + assert!(ident.starts_with("cargo-")); + } + + #[test] + pub fn test_url_to_path_ident_without_path() { + let ident = ident(&url("https://github.com")).unwrap(); + assert!(ident.starts_with("_empty-")); + } + + #[test] + fn test_canonicalize_idents_by_stripping_trailing_url_slash() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_lowercasing_github_urls() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_stripping_dot_git() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_different_protocols() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_cannot_be_a_base_urls() { + assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err()); + assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err()); + } + + fn url(s: &str) -> Url { + s.to_url().unwrap() + } +} diff --git a/src/cargo/sources/git/utils.rs b/src/cargo/sources/git/utils.rs new file mode 100644 index 000000000..c6663342b --- /dev/null +++ b/src/cargo/sources/git/utils.rs @@ -0,0 +1,870 @@ +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use curl::easy::{Easy, List}; +use git2::{self, ObjectType}; +use serde::ser::{self, Serialize}; +use url::Url; + +use core::GitReference; +use util::{internal, network, Config, Progress, ToUrl}; +use util::paths; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +#[derive(PartialEq, Clone, Debug)] +pub struct GitRevision(git2::Oid); + +impl ser::Serialize for GitRevision { + fn serialize(&self, s: S) -> Result { + serialize_str(self, s) + } +} + +fn serialize_str(t: &T, s: S) -> Result +where + T: fmt::Display, + S: ser::Serializer, +{ + t.to_string().serialize(s) +} + +impl fmt::Display for GitRevision { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +pub struct GitShortID(git2::Buf); + +impl GitShortID { + pub fn as_str(&self) -> &str { + self.0.as_str().unwrap() + } +} + +/// `GitRemote` represents a remote repository. It gets cloned into a local +/// `GitDatabase`. +#[derive(PartialEq, Clone, Debug, Serialize)] +pub struct GitRemote { + #[serde(serialize_with = "serialize_str")] + url: Url, +} + +/// `GitDatabase` is a local clone of a remote repository's database. Multiple +/// `GitCheckouts` can be cloned from this `GitDatabase`. +#[derive(Serialize)] +pub struct GitDatabase { + remote: GitRemote, + path: PathBuf, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +/// `GitCheckout` is a local checkout of a particular revision. Calling +/// `clone_into` with a reference will resolve the reference into a revision, +/// and return a `CargoError` if no revision for that reference was found. +#[derive(Serialize)] +pub struct GitCheckout<'a> { + database: &'a GitDatabase, + location: PathBuf, + revision: GitRevision, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +// Implementations + +impl GitRemote { + pub fn new(url: &Url) -> GitRemote { + GitRemote { url: url.clone() } + } + + pub fn url(&self) -> &Url { + &self.url + } + + pub fn rev_for(&self, path: &Path, reference: &GitReference) -> CargoResult { + reference.resolve(&self.db_at(path)?.repo) + } + + pub fn checkout( + &self, + into: &Path, + reference: &GitReference, + cargo_config: &Config, + ) -> CargoResult<(GitDatabase, GitRevision)> { + let mut repo_and_rev = None; + if let Ok(mut repo) = git2::Repository::open(into) { + self.fetch_into(&mut repo, cargo_config) + .chain_err(|| format!("failed to fetch into {}", into.display()))?; + if let Ok(rev) = reference.resolve(&repo) { + repo_and_rev = Some((repo, rev)); + } + } + let (repo, rev) = match repo_and_rev { + Some(pair) => pair, + None => { + let repo = self.clone_into(into, cargo_config) + .chain_err(|| format!("failed to clone into: {}", into.display()))?; + let rev = reference.resolve(&repo)?; + (repo, rev) + } + }; + + Ok(( + GitDatabase { + remote: self.clone(), + path: into.to_path_buf(), + repo, + }, + rev, + )) + } + + pub fn db_at(&self, db_path: &Path) -> CargoResult { + let repo = git2::Repository::open(db_path)?; + Ok(GitDatabase { + remote: self.clone(), + path: db_path.to_path_buf(), + repo, + }) + } + + fn fetch_into(&self, dst: &mut git2::Repository, cargo_config: &Config) -> CargoResult<()> { + // Create a local anonymous remote in the repository to fetch the url + let refspec = "refs/heads/*:refs/heads/*"; + fetch(dst, &self.url, refspec, cargo_config) + } + + fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult { + if fs::metadata(&dst).is_ok() { + paths::remove_dir_all(dst)?; + } + fs::create_dir_all(dst)?; + let mut repo = git2::Repository::init_bare(dst)?; + fetch( + &mut repo, + &self.url, + "refs/heads/*:refs/heads/*", + cargo_config, + )?; + Ok(repo) + } +} + +impl GitDatabase { + pub fn copy_to( + &self, + rev: GitRevision, + dest: &Path, + cargo_config: &Config, + ) -> CargoResult { + let mut checkout = None; + if let Ok(repo) = git2::Repository::open(dest) { + let mut co = GitCheckout::new(dest, self, rev.clone(), repo); + if !co.is_fresh() { + // After a successful fetch operation do a sanity check to + // ensure we've got the object in our database to reset to. This + // can fail sometimes for corrupt repositories where the fetch + // operation succeeds but the object isn't actually there. + co.fetch(cargo_config)?; + if co.has_object() { + co.reset(cargo_config)?; + assert!(co.is_fresh()); + checkout = Some(co); + } + } else { + checkout = Some(co); + } + }; + let checkout = match checkout { + Some(c) => c, + None => GitCheckout::clone_into(dest, self, rev, cargo_config)?, + }; + checkout.update_submodules(cargo_config)?; + Ok(checkout) + } + + pub fn to_short_id(&self, revision: GitRevision) -> CargoResult { + let obj = self.repo.find_object(revision.0, None)?; + Ok(GitShortID(obj.short_id()?)) + } + + pub fn has_ref(&self, reference: &str) -> CargoResult<()> { + self.repo.revparse_single(reference)?; + Ok(()) + } +} + +impl GitReference { + fn resolve(&self, repo: &git2::Repository) -> CargoResult { + let id = match *self { + GitReference::Tag(ref s) => (|| -> CargoResult { + let refname = format!("refs/tags/{}", s); + let id = repo.refname_to_id(&refname)?; + let obj = repo.find_object(id, None)?; + let obj = obj.peel(ObjectType::Commit)?; + Ok(obj.id()) + })() + .chain_err(|| format!("failed to find tag `{}`", s))?, + GitReference::Branch(ref s) => { + let b = repo.find_branch(s, git2::BranchType::Local) + .chain_err(|| format!("failed to find branch `{}`", s))?; + b.get() + .target() + .ok_or_else(|| format_err!("branch `{}` did not have a target", s))? + } + GitReference::Rev(ref s) => { + let obj = repo.revparse_single(s)?; + match obj.as_tag() { + Some(tag) => tag.target_id(), + None => obj.id(), + } + } + }; + Ok(GitRevision(id)) + } +} + +impl<'a> GitCheckout<'a> { + fn new( + path: &Path, + database: &'a GitDatabase, + revision: GitRevision, + repo: git2::Repository, + ) -> GitCheckout<'a> { + GitCheckout { + location: path.to_path_buf(), + database, + revision, + repo, + } + } + + fn clone_into( + into: &Path, + database: &'a GitDatabase, + revision: GitRevision, + config: &Config, + ) -> CargoResult> { + let dirname = into.parent().unwrap(); + fs::create_dir_all(&dirname).chain_err(|| format!("Couldn't mkdir {}", dirname.display()))?; + if into.exists() { + paths::remove_dir_all(into)?; + } + + // we're doing a local filesystem-to-filesystem clone so there should + // be no need to respect global configuration options, so pass in + // an empty instance of `git2::Config` below. + let git_config = git2::Config::new()?; + + // Clone the repository, but make sure we use the "local" option in + // libgit2 which will attempt to use hardlinks to set up the database. + // This should speed up the clone operation quite a bit if it works. + // + // Note that we still use the same fetch options because while we don't + // need authentication information we may want progress bars and such. + let url = database.path.to_url()?; + let mut repo = None; + with_fetch_options(&git_config, &url, config, &mut |fopts| { + let mut checkout = git2::build::CheckoutBuilder::new(); + checkout.dry_run(); // we'll do this below during a `reset` + + let r = git2::build::RepoBuilder::new() + // use hard links and/or copy the database, we're doing a + // filesystem clone so this'll speed things up quite a bit. + .clone_local(git2::build::CloneLocal::Local) + .with_checkout(checkout) + .fetch_options(fopts) + // .remote_create(|repo, _name, url| repo.remote_anonymous(url)) + .clone(url.as_str(), into)?; + repo = Some(r); + Ok(()) + })?; + let repo = repo.unwrap(); + + let checkout = GitCheckout::new(into, database, revision, repo); + checkout.reset(config)?; + Ok(checkout) + } + + fn is_fresh(&self) -> bool { + match self.repo.revparse_single("HEAD") { + Ok(ref head) if head.id() == self.revision.0 => { + // See comments in reset() for why we check this + self.location.join(".cargo-ok").exists() + } + _ => false, + } + } + + fn fetch(&mut self, cargo_config: &Config) -> CargoResult<()> { + info!("fetch {}", self.repo.path().display()); + let url = self.database.path.to_url()?; + let refspec = "refs/heads/*:refs/heads/*"; + fetch(&mut self.repo, &url, refspec, cargo_config)?; + Ok(()) + } + + fn has_object(&self) -> bool { + self.repo.find_object(self.revision.0, None).is_ok() + } + + fn reset(&self, config: &Config) -> CargoResult<()> { + // If we're interrupted while performing this reset (e.g. we die because + // of a signal) Cargo needs to be sure to try to check out this repo + // again on the next go-round. + // + // To enable this we have a dummy file in our checkout, .cargo-ok, which + // if present means that the repo has been successfully reset and is + // ready to go. Hence if we start to do a reset, we make sure this file + // *doesn't* exist, and then once we're done we create the file. + let ok_file = self.location.join(".cargo-ok"); + let _ = paths::remove_file(&ok_file); + info!("reset {} to {}", self.repo.path().display(), self.revision); + let object = self.repo.find_object(self.revision.0, None)?; + reset(&self.repo, &object, config)?; + File::create(ok_file)?; + Ok(()) + } + + fn update_submodules(&self, cargo_config: &Config) -> CargoResult<()> { + return update_submodules(&self.repo, cargo_config); + + fn update_submodules(repo: &git2::Repository, cargo_config: &Config) -> CargoResult<()> { + info!("update submodules for: {:?}", repo.workdir().unwrap()); + + for mut child in repo.submodules()? { + update_submodule(repo, &mut child, cargo_config).chain_err(|| { + format!( + "failed to update submodule `{}`", + child.name().unwrap_or("") + ) + })?; + } + Ok(()) + } + + fn update_submodule( + parent: &git2::Repository, + child: &mut git2::Submodule, + cargo_config: &Config, + ) -> CargoResult<()> { + child.init(false)?; + let url = child + .url() + .ok_or_else(|| internal("non-utf8 url for submodule"))?; + + // A submodule which is listed in .gitmodules but not actually + // checked out will not have a head id, so we should ignore it. + let head = match child.head_id() { + Some(head) => head, + None => return Ok(()), + }; + + // If the submodule hasn't been checked out yet, we need to + // clone it. If it has been checked out and the head is the same + // as the submodule's head, then we can skip an update and keep + // recursing. + let head_and_repo = child.open().and_then(|repo| { + let target = repo.head()?.target(); + Ok((target, repo)) + }); + let mut repo = match head_and_repo { + Ok((head, repo)) => { + if child.head_id() == head { + return update_submodules(&repo, cargo_config); + } + repo + } + Err(..) => { + let path = parent.workdir().unwrap().join(child.path()); + let _ = paths::remove_dir_all(&path); + git2::Repository::init(&path)? + } + }; + + // Fetch data from origin and reset to the head commit + let refspec = "refs/heads/*:refs/heads/*"; + let url = url.to_url()?; + fetch(&mut repo, &url, refspec, cargo_config).chain_err(|| { + internal(format!( + "failed to fetch submodule `{}` from {}", + child.name().unwrap_or(""), + url + )) + })?; + + let obj = repo.find_object(head, None)?; + reset(&repo, &obj, cargo_config)?; + update_submodules(&repo, cargo_config) + } + } +} + +/// Prepare the authentication callbacks for cloning a git repository. +/// +/// The main purpose of this function is to construct the "authentication +/// callback" which is used to clone a repository. This callback will attempt to +/// find the right authentication on the system (without user input) and will +/// guide libgit2 in doing so. +/// +/// The callback is provided `allowed` types of credentials, and we try to do as +/// much as possible based on that: +/// +/// * Prioritize SSH keys from the local ssh agent as they're likely the most +/// reliable. The username here is prioritized from the credential +/// callback, then from whatever is configured in git itself, and finally +/// we fall back to the generic user of `git`. +/// +/// * If a username/password is allowed, then we fallback to git2-rs's +/// implementation of the credential helper. This is what is configured +/// with `credential.helper` in git, and is the interface for the OSX +/// keychain, for example. +/// +/// * After the above two have failed, we just kinda grapple attempting to +/// return *something*. +/// +/// If any form of authentication fails, libgit2 will repeatedly ask us for +/// credentials until we give it a reason to not do so. To ensure we don't +/// just sit here looping forever we keep track of authentications we've +/// attempted and we don't try the same ones again. +fn with_authentication(url: &str, cfg: &git2::Config, mut f: F) -> CargoResult +where + F: FnMut(&mut git2::Credentials) -> CargoResult, +{ + let mut cred_helper = git2::CredentialHelper::new(url); + cred_helper.config(cfg); + + let mut ssh_username_requested = false; + let mut cred_helper_bad = None; + let mut ssh_agent_attempts = Vec::new(); + let mut any_attempts = false; + let mut tried_sshkey = false; + + let mut res = f(&mut |url, username, allowed| { + any_attempts = true; + // libgit2's "USERNAME" authentication actually means that it's just + // asking us for a username to keep going. This is currently only really + // used for SSH authentication and isn't really an authentication type. + // The logic currently looks like: + // + // let user = ...; + // if (user.is_null()) + // user = callback(USERNAME, null, ...); + // + // callback(SSH_KEY, user, ...) + // + // So if we're being called here then we know that (a) we're using ssh + // authentication and (b) no username was specified in the URL that + // we're trying to clone. We need to guess an appropriate username here, + // but that may involve a few attempts. Unfortunately we can't switch + // usernames during one authentication session with libgit2, so to + // handle this we bail out of this authentication session after setting + // the flag `ssh_username_requested`, and then we handle this below. + if allowed.contains(git2::CredentialType::USERNAME) { + debug_assert!(username.is_none()); + ssh_username_requested = true; + return Err(git2::Error::from_str("gonna try usernames later")); + } + + // An "SSH_KEY" authentication indicates that we need some sort of SSH + // authentication. This can currently either come from the ssh-agent + // process or from a raw in-memory SSH key. Cargo only supports using + // ssh-agent currently. + // + // If we get called with this then the only way that should be possible + // is if a username is specified in the URL itself (e.g. `username` is + // Some), hence the unwrap() here. We try custom usernames down below. + if allowed.contains(git2::CredentialType::SSH_KEY) && !tried_sshkey { + // If ssh-agent authentication fails, libgit2 will keep + // calling this callback asking for other authentication + // methods to try. Make sure we only try ssh-agent once, + // to avoid looping forever. + tried_sshkey = true; + let username = username.unwrap(); + debug_assert!(!ssh_username_requested); + ssh_agent_attempts.push(username.to_string()); + return git2::Cred::ssh_key_from_agent(username); + } + + // Sometimes libgit2 will ask for a username/password in plaintext. This + // is where Cargo would have an interactive prompt if we supported it, + // but we currently don't! Right now the only way we support fetching a + // plaintext password is through the `credential.helper` support, so + // fetch that here. + if allowed.contains(git2::CredentialType::USER_PASS_PLAINTEXT) { + let r = git2::Cred::credential_helper(cfg, url, username); + cred_helper_bad = Some(r.is_err()); + return r; + } + + // I'm... not sure what the DEFAULT kind of authentication is, but seems + // easy to support? + if allowed.contains(git2::CredentialType::DEFAULT) { + return git2::Cred::default(); + } + + // Whelp, we tried our best + Err(git2::Error::from_str("no authentication available")) + }); + + // Ok, so if it looks like we're going to be doing ssh authentication, we + // want to try a few different usernames as one wasn't specified in the URL + // for us to use. In order, we'll try: + // + // * A credential helper's username for this URL, if available. + // * This account's username. + // * "git" + // + // We have to restart the authentication session each time (due to + // constraints in libssh2 I guess? maybe this is inherent to ssh?), so we + // call our callback, `f`, in a loop here. + if ssh_username_requested { + debug_assert!(res.is_err()); + let mut attempts = Vec::new(); + attempts.push("git".to_string()); + if let Ok(s) = env::var("USER").or_else(|_| env::var("USERNAME")) { + attempts.push(s); + } + if let Some(ref s) = cred_helper.username { + attempts.push(s.clone()); + } + + while let Some(s) = attempts.pop() { + // We should get `USERNAME` first, where we just return our attempt, + // and then after that we should get `SSH_KEY`. If the first attempt + // fails we'll get called again, but we don't have another option so + // we bail out. + let mut attempts = 0; + res = f(&mut |_url, username, allowed| { + if allowed.contains(git2::CredentialType::USERNAME) { + return git2::Cred::username(&s); + } + if allowed.contains(git2::CredentialType::SSH_KEY) { + debug_assert_eq!(Some(&s[..]), username); + attempts += 1; + if attempts == 1 { + ssh_agent_attempts.push(s.to_string()); + return git2::Cred::ssh_key_from_agent(&s); + } + } + Err(git2::Error::from_str("no authentication available")) + }); + + // If we made two attempts then that means: + // + // 1. A username was requested, we returned `s`. + // 2. An ssh key was requested, we returned to look up `s` in the + // ssh agent. + // 3. For whatever reason that lookup failed, so we were asked again + // for another mode of authentication. + // + // Essentially, if `attempts == 2` then in theory the only error was + // that this username failed to authenticate (e.g. no other network + // errors happened). Otherwise something else is funny so we bail + // out. + if attempts != 2 { + break; + } + } + } + + if res.is_ok() || !any_attempts { + return res.map_err(From::from); + } + + // In the case of an authentication failure (where we tried something) then + // we try to give a more helpful error message about precisely what we + // tried. + let res = res.map_err(CargoError::from).chain_err(|| { + let mut msg = "failed to authenticate when downloading \ + repository" + .to_string(); + if !ssh_agent_attempts.is_empty() { + let names = ssh_agent_attempts + .iter() + .map(|s| format!("`{}`", s)) + .collect::>() + .join(", "); + msg.push_str(&format!( + "\nattempted ssh-agent authentication, but \ + none of the usernames {} succeeded", + names + )); + } + if let Some(failed_cred_helper) = cred_helper_bad { + if failed_cred_helper { + msg.push_str( + "\nattempted to find username/password via \ + git's `credential.helper` support, but failed", + ); + } else { + msg.push_str( + "\nattempted to find username/password via \ + `credential.helper`, but maybe the found \ + credentials were incorrect", + ); + } + } + msg + })?; + Ok(res) +} + +fn reset(repo: &git2::Repository, obj: &git2::Object, config: &Config) -> CargoResult<()> { + let mut pb = Progress::new("Checkout", config); + let mut opts = git2::build::CheckoutBuilder::new(); + opts.progress(|_, cur, max| { + drop(pb.tick(cur, max)); + }); + repo.reset(obj, git2::ResetType::Hard, Some(&mut opts))?; + Ok(()) +} + +pub fn with_fetch_options( + git_config: &git2::Config, + url: &Url, + config: &Config, + cb: &mut FnMut(git2::FetchOptions) -> CargoResult<()>, +) -> CargoResult<()> { + let mut progress = Progress::new("Fetch", config); + network::with_retry(config, || { + with_authentication(url.as_str(), git_config, |f| { + let mut rcb = git2::RemoteCallbacks::new(); + rcb.credentials(f); + + rcb.transfer_progress(|stats| { + progress + .tick(stats.indexed_objects(), stats.total_objects()) + .is_ok() + }); + + // Create a local anonymous remote in the repository to fetch the + // url + let mut opts = git2::FetchOptions::new(); + opts.remote_callbacks(rcb) + .download_tags(git2::AutotagOption::All); + cb(opts) + })?; + Ok(()) + }) +} + +pub fn fetch( + repo: &mut git2::Repository, + url: &Url, + refspec: &str, + config: &Config, +) -> CargoResult<()> { + if config.frozen() { + bail!( + "attempting to update a git repository, but --frozen \ + was specified" + ) + } + if !config.network_allowed() { + bail!("can't update a git repository in the offline mode") + } + + // If we're fetching from GitHub, attempt GitHub's special fast path for + // testing if we've already got an up-to-date copy of the repository + if url.host_str() == Some("github.com") { + if let Ok(oid) = repo.refname_to_id("refs/remotes/origin/master") { + let mut handle = config.http()?.borrow_mut(); + debug!("attempting GitHub fast path for {}", url); + if github_up_to_date(&mut handle, url, &oid) { + return Ok(()); + } else { + debug!("fast path failed, falling back to a git fetch"); + } + } + } + + // We reuse repositories quite a lot, so before we go through and update the + // repo check to see if it's a little too old and could benefit from a gc. + // In theory this shouldn't be too too expensive compared to the network + // request we're about to issue. + maybe_gc_repo(repo)?; + + debug!("doing a fetch for {}", url); + let git_config = git2::Config::open_default()?; + with_fetch_options(&git_config, url, config, &mut |mut opts| { + // The `fetch` operation here may fail spuriously due to a corrupt + // repository. It could also fail, however, for a whole slew of other + // reasons (aka network related reasons). We want Cargo to automatically + // recover from corrupt repositories, but we don't want Cargo to stomp + // over other legitimate errors.o + // + // Consequently we save off the error of the `fetch` operation and if it + // looks like a "corrupt repo" error then we blow away the repo and try + // again. If it looks like any other kind of error, or if we've already + // blown away the repository, then we want to return the error as-is. + let mut repo_reinitialized = false; + loop { + debug!("initiating fetch of {} from {}", refspec, url); + let res = repo.remote_anonymous(url.as_str())? + .fetch(&[refspec], Some(&mut opts), None); + let err = match res { + Ok(()) => break, + Err(e) => e, + }; + debug!("fetch failed: {}", err); + + if !repo_reinitialized && err.class() == git2::ErrorClass::Reference { + repo_reinitialized = true; + debug!( + "looks like this is a corrupt repository, reinitializing \ + and trying again" + ); + if reinitialize(repo).is_ok() { + continue; + } + } + + return Err(err.into()); + } + Ok(()) + }) +} + +/// Cargo has a bunch of long-lived git repositories in its global cache and +/// some, like the index, are updated very frequently. Right now each update +/// creates a new "pack file" inside the git database, and over time this can +/// cause bad performance and bad current behavior in libgit2. +/// +/// One pathological use case today is where libgit2 opens hundreds of file +/// descriptors, getting us dangerously close to blowing out the OS limits of +/// how many fds we can have open. This is detailed in #4403. +/// +/// To try to combat this problem we attempt a `git gc` here. Note, though, that +/// we may not even have `git` installed on the system! As a result we +/// opportunistically try a `git gc` when the pack directory looks too big, and +/// failing that we just blow away the repository and start over. +fn maybe_gc_repo(repo: &mut git2::Repository) -> CargoResult<()> { + // Here we arbitrarily declare that if you have more than 100 files in your + // `pack` folder that we need to do a gc. + let entries = match repo.path().join("objects/pack").read_dir() { + Ok(e) => e.count(), + Err(_) => { + debug!("skipping gc as pack dir appears gone"); + return Ok(()); + } + }; + let max = env::var("__CARGO_PACKFILE_LIMIT") + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(100); + if entries < max { + debug!("skipping gc as there's only {} pack files", entries); + return Ok(()); + } + + // First up, try a literal `git gc` by shelling out to git. This is pretty + // likely to fail though as we may not have `git` installed. Note that + // libgit2 doesn't currently implement the gc operation, so there's no + // equivalent there. + match Command::new("git") + .arg("gc") + .current_dir(repo.path()) + .output() + { + Ok(out) => { + debug!( + "git-gc status: {}\n\nstdout ---\n{}\nstderr ---\n{}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + if out.status.success() { + let new = git2::Repository::open(repo.path())?; + mem::replace(repo, new); + return Ok(()); + } + } + Err(e) => debug!("git-gc failed to spawn: {}", e), + } + + // Alright all else failed, let's start over. + reinitialize(repo) +} + +fn reinitialize(repo: &mut git2::Repository) -> CargoResult<()> { + // Here we want to drop the current repository object pointed to by `repo`, + // so we initialize temporary repository in a sub-folder, blow away the + // existing git folder, and then recreate the git repo. Finally we blow away + // the `tmp` folder we allocated. + let path = repo.path().to_path_buf(); + debug!("reinitializing git repo at {:?}", path); + let tmp = path.join("tmp"); + let bare = !repo.path().ends_with(".git"); + *repo = git2::Repository::init(&tmp)?; + for entry in path.read_dir()? { + let entry = entry?; + if entry.file_name().to_str() == Some("tmp") { + continue; + } + let path = entry.path(); + drop(paths::remove_file(&path).or_else(|_| paths::remove_dir_all(&path))); + } + if bare { + *repo = git2::Repository::init_bare(path)?; + } else { + *repo = git2::Repository::init(path)?; + } + paths::remove_dir_all(&tmp)?; + Ok(()) +} + +/// Updating the index is done pretty regularly so we want it to be as fast as +/// possible. For registries hosted on GitHub (like the crates.io index) there's +/// a fast path available to use [1] to tell us that there's no updates to be +/// made. +/// +/// This function will attempt to hit that fast path and verify that the `oid` +/// is actually the current `master` branch of the repository. If `true` is +/// returned then no update needs to be performed, but if `false` is returned +/// then the standard update logic still needs to happen. +/// +/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference +/// +/// Note that this function should never cause an actual failure because it's +/// just a fast path. As a result all errors are ignored in this function and we +/// just return a `bool`. Any real errors will be reported through the normal +/// update path above. +fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool { + macro_rules! try { + ($e:expr) => (match $e { + Some(e) => e, + None => return false, + }) + } + + // This expects GitHub urls in the form `github.com/user/repo` and nothing + // else + let mut pieces = try!(url.path_segments()); + let username = try!(pieces.next()); + let repo = try!(pieces.next()); + if pieces.next().is_some() { + return false; + } + + let url = format!( + "https://api.github.com/repos/{}/{}/commits/master", + username, repo + ); + try!(handle.get(true).ok()); + try!(handle.url(&url).ok()); + try!(handle.useragent("cargo").ok()); + let mut headers = List::new(); + try!(headers.append("Accept: application/vnd.github.3.sha").ok()); + try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok()); + try!(handle.http_headers(headers).ok()); + try!(handle.perform().ok()); + + try!(handle.response_code().ok()) == 304 +} diff --git a/src/cargo/sources/mod.rs b/src/cargo/sources/mod.rs new file mode 100644 index 000000000..ed784e95a --- /dev/null +++ b/src/cargo/sources/mod.rs @@ -0,0 +1,13 @@ +pub use self::config::SourceConfigMap; +pub use self::directory::DirectorySource; +pub use self::git::GitSource; +pub use self::path::PathSource; +pub use self::registry::{RegistrySource, CRATES_IO}; +pub use self::replaced::ReplacedSource; + +pub mod config; +pub mod directory; +pub mod git; +pub mod path; +pub mod registry; +pub mod replaced; diff --git a/src/cargo/sources/path.rs b/src/cargo/sources/path.rs new file mode 100644 index 000000000..c7a0fdf75 --- /dev/null +++ b/src/cargo/sources/path.rs @@ -0,0 +1,541 @@ +use std::fmt::{self, Debug, Formatter}; +use std::fs; +use std::path::{Path, PathBuf}; + +use filetime::FileTime; +use git2; +use glob::Pattern; +use ignore::Match; +use ignore::gitignore::GitignoreBuilder; + +use core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use ops; +use util::{self, internal, CargoResult}; +use util::paths; +use util::Config; + +pub struct PathSource<'cfg> { + source_id: SourceId, + path: PathBuf, + updated: bool, + packages: Vec, + config: &'cfg Config, + recursive: bool, +} + +impl<'cfg> PathSource<'cfg> { + /// Invoked with an absolute path to a directory that contains a Cargo.toml. + /// + /// This source will only return the package at precisely the `path` + /// specified, and it will be an error if there's not a package at `path`. + pub fn new(path: &Path, id: &SourceId, config: &'cfg Config) -> PathSource<'cfg> { + PathSource { + source_id: id.clone(), + path: path.to_path_buf(), + updated: false, + packages: Vec::new(), + config, + recursive: false, + } + } + + /// Creates a new source which is walked recursively to discover packages. + /// + /// This is similar to the `new` method except that instead of requiring a + /// valid package to be present at `root` the folder is walked entirely to + /// crawl for packages. + /// + /// Note that this should be used with care and likely shouldn't be chosen + /// by default! + pub fn new_recursive(root: &Path, id: &SourceId, config: &'cfg Config) -> PathSource<'cfg> { + PathSource { + recursive: true, + ..PathSource::new(root, id, config) + } + } + + pub fn preload_with(&mut self, pkg: Package) { + assert!(!self.updated); + assert!(!self.recursive); + assert!(self.packages.is_empty()); + self.updated = true; + self.packages.push(pkg); + } + + pub fn root_package(&mut self) -> CargoResult { + trace!("root_package; source={:?}", self); + + self.update()?; + + match self.packages.iter().find(|p| p.root() == &*self.path) { + Some(pkg) => Ok(pkg.clone()), + None => Err(internal("no package found in source")), + } + } + + pub fn read_packages(&self) -> CargoResult> { + if self.updated { + Ok(self.packages.clone()) + } else if self.recursive { + ops::read_packages(&self.path, &self.source_id, self.config) + } else { + let path = self.path.join("Cargo.toml"); + let (pkg, _) = ops::read_package(&path, &self.source_id, self.config)?; + Ok(vec![pkg]) + } + } + + /// List all files relevant to building this package inside this source. + /// + /// This function will use the appropriate methods to determine the + /// set of files underneath this source's directory which are relevant for + /// building `pkg`. + /// + /// The basic assumption of this method is that all files in the directory + /// are relevant for building this package, but it also contains logic to + /// use other methods like .gitignore to filter the list of files. + /// + /// ## Pattern matching strategy + /// + /// Migrating from a glob-like pattern matching (using `glob` crate) to a + /// gitignore-like pattern matching (using `ignore` crate). The migration + /// stages are: + /// + /// 1) Only warn users about the future change iff their matching rules are + /// affected. (CURRENT STAGE) + /// + /// 2) Switch to the new strategy and update documents. Still keep warning + /// affected users. + /// + /// 3) Drop the old strategy and no more warnings. + /// + /// See for more info. + pub fn list_files(&self, pkg: &Package) -> CargoResult> { + let root = pkg.root(); + let no_include_option = pkg.manifest().include().is_empty(); + + // glob-like matching rules + + let glob_parse = |p: &String| { + let pattern: &str = if p.starts_with('/') { + &p[1..p.len()] + } else { + p + }; + Pattern::new(pattern) + .map_err(|e| format_err!("could not parse glob pattern `{}`: {}", p, e)) + }; + + let glob_exclude = pkg.manifest() + .exclude() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_include = pkg.manifest() + .include() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_should_package = |relative_path: &Path| -> bool { + fn glob_match(patterns: &Vec, relative_path: &Path) -> bool { + patterns + .iter() + .any(|pattern| pattern.matches_path(relative_path)) + } + + // include and exclude options are mutually exclusive. + if no_include_option { + !glob_match(&glob_exclude, relative_path) + } else { + glob_match(&glob_include, relative_path) + } + }; + + // ignore-like matching rules + + let mut exclude_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().exclude() { + exclude_builder.add_line(None, rule)?; + } + let ignore_exclude = exclude_builder.build()?; + + let mut include_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().include() { + include_builder.add_line(None, rule)?; + } + let ignore_include = include_builder.build()?; + + let ignore_should_package = |relative_path: &Path| -> CargoResult { + // include and exclude options are mutually exclusive. + if no_include_option { + match ignore_exclude + .matched_path_or_any_parents(relative_path, /* is_dir */ false) + { + Match::None => Ok(true), + Match::Ignore(_) => Ok(false), + Match::Whitelist(pattern) => Err(format_err!( + "exclude rules cannot start with `!`: {}", + pattern.original() + )), + } + } else { + match ignore_include + .matched_path_or_any_parents(relative_path, /* is_dir */ false) + { + Match::None => Ok(false), + Match::Ignore(_) => Ok(true), + Match::Whitelist(pattern) => Err(format_err!( + "include rules cannot start with `!`: {}", + pattern.original() + )), + } + } + }; + + // matching to paths + + let mut filter = |path: &Path| -> CargoResult { + let relative_path = util::without_prefix(path, root).unwrap(); + let glob_should_package = glob_should_package(relative_path); + let ignore_should_package = ignore_should_package(relative_path)?; + + if glob_should_package != ignore_should_package { + if glob_should_package { + if no_include_option { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be excluded in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } else { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be included in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } + } else if no_include_option { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be excluded in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } else { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be included in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } + } + + // Update to ignore_should_package for Stage 2 + Ok(glob_should_package) + }; + + // attempt git-prepopulate only if no `include` (rust-lang/cargo#4135) + if no_include_option { + if let Some(result) = self.discover_git_and_list_files(pkg, root, &mut filter) { + return result; + } + } + self.list_files_walk(pkg, &mut filter) + } + + // Returns Some(_) if found sibling Cargo.toml and .git folder; + // otherwise caller should fall back on full file list. + fn discover_git_and_list_files( + &self, + pkg: &Package, + root: &Path, + filter: &mut FnMut(&Path) -> CargoResult, + ) -> Option>> { + // If this package is in a git repository, then we really do want to + // query the git repository as it takes into account items such as + // .gitignore. We're not quite sure where the git repository is, + // however, so we do a bit of a probe. + // + // We walk this package's path upwards and look for a sibling + // Cargo.toml and .git folder. If we find one then we assume that we're + // part of that repository. + let mut cur = root; + loop { + if cur.join("Cargo.toml").is_file() { + // If we find a git repository next to this Cargo.toml, we still + // check to see if we are indeed part of the index. If not, then + // this is likely an unrelated git repo, so keep going. + if let Ok(repo) = git2::Repository::open(cur) { + let index = match repo.index() { + Ok(index) => index, + Err(err) => return Some(Err(err.into())), + }; + let path = util::without_prefix(root, cur).unwrap().join("Cargo.toml"); + if index.get_path(&path, 0).is_some() { + return Some(self.list_files_git(pkg, repo, filter)); + } + } + } + // don't cross submodule boundaries + if cur.join(".git").is_dir() { + break; + } + match cur.parent() { + Some(parent) => cur = parent, + None => break, + } + } + None + } + + fn list_files_git( + &self, + pkg: &Package, + repo: git2::Repository, + filter: &mut FnMut(&Path) -> CargoResult, + ) -> CargoResult> { + warn!("list_files_git {}", pkg.package_id()); + let index = repo.index()?; + let root = repo.workdir() + .ok_or_else(|| internal("Can't list files on a bare repository."))?; + let pkg_path = pkg.root(); + + let mut ret = Vec::::new(); + + // We use information from the git repository to guide us in traversing + // its tree. The primary purpose of this is to take advantage of the + // .gitignore and auto-ignore files that don't matter. + // + // Here we're also careful to look at both tracked and untracked files as + // the untracked files are often part of a build and may become relevant + // as part of a future commit. + let index_files = index.iter().map(|entry| { + use libgit2_sys::GIT_FILEMODE_COMMIT; + let is_dir = entry.mode == GIT_FILEMODE_COMMIT as u32; + (join(root, &entry.path), Some(is_dir)) + }); + let mut opts = git2::StatusOptions::new(); + opts.include_untracked(true); + if let Some(suffix) = util::without_prefix(pkg_path, root) { + opts.pathspec(suffix); + } + let statuses = repo.statuses(Some(&mut opts))?; + let untracked = statuses.iter().filter_map(|entry| match entry.status() { + git2::Status::WT_NEW => Some((join(root, entry.path_bytes()), None)), + _ => None, + }); + + let mut subpackages_found = Vec::new(); + + for (file_path, is_dir) in index_files.chain(untracked) { + let file_path = file_path?; + + // Filter out files blatantly outside this package. This is helped a + // bit obove via the `pathspec` function call, but we need to filter + // the entries in the index as well. + if !file_path.starts_with(pkg_path) { + continue; + } + + match file_path.file_name().and_then(|s| s.to_str()) { + // Filter out Cargo.lock and target always, we don't want to + // package a lock file no one will ever read and we also avoid + // build artifacts + Some("Cargo.lock") | Some("target") => continue, + + // Keep track of all sub-packages found and also strip out all + // matches we've found so far. Note, though, that if we find + // our own `Cargo.toml` we keep going. + Some("Cargo.toml") => { + let path = file_path.parent().unwrap(); + if path != pkg_path { + warn!("subpackage found: {}", path.display()); + ret.retain(|p| !p.starts_with(path)); + subpackages_found.push(path.to_path_buf()); + continue; + } + } + + _ => {} + } + + // If this file is part of any other sub-package we've found so far, + // skip it. + if subpackages_found.iter().any(|p| file_path.starts_with(p)) { + continue; + } + + if is_dir.unwrap_or_else(|| file_path.is_dir()) { + warn!(" found submodule {}", file_path.display()); + let rel = util::without_prefix(&file_path, root).unwrap(); + let rel = rel.to_str() + .ok_or_else(|| format_err!("invalid utf-8 filename: {}", rel.display()))?; + // Git submodules are currently only named through `/` path + // separators, explicitly not `\` which windows uses. Who knew? + let rel = rel.replace(r"\", "/"); + match repo.find_submodule(&rel).and_then(|s| s.open()) { + Ok(repo) => { + let files = self.list_files_git(pkg, repo, filter)?; + ret.extend(files.into_iter()); + } + Err(..) => { + PathSource::walk(&file_path, &mut ret, false, filter)?; + } + } + } else if (*filter)(&file_path)? { + // We found a file! + warn!(" found {}", file_path.display()); + ret.push(file_path); + } + } + return Ok(ret); + + #[cfg(unix)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::os::unix::prelude::*; + use std::ffi::OsStr; + Ok(path.join(::from_bytes(data))) + } + #[cfg(windows)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(data) { + Ok(s) => Ok(path.join(s)), + Err(..) => Err(internal( + "cannot process path in git with a non \ + unicode filename", + )), + } + } + } + + fn list_files_walk( + &self, + pkg: &Package, + filter: &mut FnMut(&Path) -> CargoResult, + ) -> CargoResult> { + let mut ret = Vec::new(); + PathSource::walk(pkg.root(), &mut ret, true, filter)?; + Ok(ret) + } + + fn walk( + path: &Path, + ret: &mut Vec, + is_root: bool, + filter: &mut FnMut(&Path) -> CargoResult, + ) -> CargoResult<()> { + if !fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false) { + if (*filter)(path)? { + ret.push(path.to_path_buf()); + } + return Ok(()); + } + // Don't recurse into any sub-packages that we have + if !is_root && fs::metadata(&path.join("Cargo.toml")).is_ok() { + return Ok(()); + } + + // For package integration tests, we need to sort the paths in a deterministic order to + // be able to match stdout warnings in the same order. + // + // TODO: Drop collect and sort after transition period and dropping warning tests. + // See + // and + let mut entries: Vec = fs::read_dir(path)?.map(|e| e.unwrap()).collect(); + entries.sort_by(|a, b| a.path().as_os_str().cmp(b.path().as_os_str())); + for entry in entries { + let path = entry.path(); + let name = path.file_name().and_then(|s| s.to_str()); + // Skip dotfile directories + if name.map(|s| s.starts_with('.')) == Some(true) { + continue; + } + if is_root { + // Skip cargo artifacts + match name { + Some("target") | Some("Cargo.lock") => continue, + _ => {} + } + } + PathSource::walk(&path, ret, false, filter)?; + } + Ok(()) + } +} + +impl<'cfg> Debug for PathSource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "the paths source") + } +} + +impl<'cfg> Source for PathSource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + for s in self.packages.iter().map(|p| p.summary()) { + if dep.matches(s) { + f(s.clone()) + } + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + false + } + + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + if !self.updated { + let packages = self.read_packages()?; + self.packages.extend(packages.into_iter()); + self.updated = true; + } + + Ok(()) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + trace!("getting packages; id={}", id); + + let pkg = self.packages.iter().find(|pkg| pkg.package_id() == id); + pkg.cloned() + .ok_or_else(|| internal(format!("failed to find {} in path source", id))) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + if !self.updated { + return Err(internal("BUG: source was not updated")); + } + + let mut max = FileTime::zero(); + let mut max_path = PathBuf::from(""); + for file in self.list_files(pkg)? { + // An fs::stat error here is either because path is a + // broken symlink, a permissions error, or a race + // condition where this path was rm'ed - either way, + // we can ignore the error and treat the path's mtime + // as 0. + let mtime = paths::mtime(&file).unwrap_or(FileTime::zero()); + warn!("{} {}", mtime, file.display()); + if mtime > max { + max = mtime; + max_path = file; + } + } + trace!("fingerprint {}: {}", self.path.display(), max); + Ok(format!("{} ({})", max, max_path.display())) + } +} diff --git a/src/cargo/sources/registry/index.rs b/src/cargo/sources/registry/index.rs new file mode 100644 index 000000000..878def113 --- /dev/null +++ b/src/cargo/sources/registry/index.rs @@ -0,0 +1,214 @@ +use std::collections::HashMap; +use std::path::Path; +use std::str; + +use serde_json; +use semver::Version; + +use core::dependency::Dependency; +use core::{PackageId, SourceId, Summary}; +use sources::registry::{RegistryPackage, INDEX_LOCK}; +use sources::registry::RegistryData; +use util::{internal, CargoResult, Config, Filesystem}; + +pub struct RegistryIndex<'cfg> { + source_id: SourceId, + path: Filesystem, + cache: HashMap>, + hashes: HashMap>, // (name, vers) => cksum + config: &'cfg Config, + locked: bool, +} + +impl<'cfg> RegistryIndex<'cfg> { + pub fn new( + id: &SourceId, + path: &Filesystem, + config: &'cfg Config, + locked: bool, + ) -> RegistryIndex<'cfg> { + RegistryIndex { + source_id: id.clone(), + path: path.clone(), + cache: HashMap::new(), + hashes: HashMap::new(), + config, + locked, + } + } + + /// Return the hash listed for a specified PackageId. + pub fn hash(&mut self, pkg: &PackageId, load: &mut RegistryData) -> CargoResult { + let name = &*pkg.name(); + let version = pkg.version(); + if let Some(s) = self.hashes.get(name).and_then(|v| v.get(version)) { + return Ok(s.clone()); + } + // Ok, we're missing the key, so parse the index file to load it. + self.summaries(name, load)?; + self.hashes + .get(name) + .and_then(|v| v.get(version)) + .ok_or_else(|| internal(format!("no hash listed for {}", pkg))) + .map(|s| s.clone()) + } + + /// Parse the on-disk metadata for the package provided + /// + /// Returns a list of pairs of (summary, yanked) for the package name + /// specified. + pub fn summaries( + &mut self, + name: &str, + load: &mut RegistryData, + ) -> CargoResult<&Vec<(Summary, bool)>> { + if self.cache.contains_key(name) { + return Ok(&self.cache[name]); + } + let summaries = self.load_summaries(name, load)?; + self.cache.insert(name.to_string(), summaries); + Ok(&self.cache[name]) + } + + fn load_summaries( + &mut self, + name: &str, + load: &mut RegistryData, + ) -> CargoResult> { + let (root, _lock) = if self.locked { + let lock = self.path + .open_ro(Path::new(INDEX_LOCK), self.config, "the registry index"); + match lock { + Ok(lock) => (lock.path().parent().unwrap().to_path_buf(), Some(lock)), + Err(_) => return Ok(Vec::new()), + } + } else { + (self.path.clone().into_path_unlocked(), None) + }; + + let fs_name = name.chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + + // see module comment for why this is structured the way it is + let path = match fs_name.len() { + 1 => format!("1/{}", fs_name), + 2 => format!("2/{}", fs_name), + 3 => format!("3/{}/{}", &fs_name[..1], fs_name), + _ => format!("{}/{}/{}", &fs_name[0..2], &fs_name[2..4], fs_name), + }; + let mut ret = Vec::new(); + let mut hit_closure = false; + let err = load.load(&root, Path::new(&path), &mut |contents| { + hit_closure = true; + let contents = str::from_utf8(contents) + .map_err(|_| format_err!("registry index file was not valid utf-8"))?; + ret.reserve(contents.lines().count()); + let lines = contents.lines().map(|s| s.trim()).filter(|l| !l.is_empty()); + + let online = !self.config.cli_unstable().offline; + // Attempt forwards-compatibility on the index by ignoring + // everything that we ourselves don't understand, that should + // allow future cargo implementations to break the + // interpretation of each line here and older cargo will simply + // ignore the new lines. + ret.extend(lines.filter_map(|line| { + let (summary, locked) = match self.parse_registry_package(line) { + Ok(p) => p, + Err(e) => { + info!("failed to parse `{}` registry package: {}", name, e); + trace!("line: {}", line); + return None + } + }; + if online || load.is_crate_downloaded(summary.package_id()) { + Some((summary, locked)) + } else { + None + } + })); + + Ok(()) + }); + + // We ignore lookup failures as those are just crates which don't exist + // or we haven't updated the registry yet. If we actually ran the + // closure though then we care about those errors. + if hit_closure { + err?; + } + + Ok(ret) + } + + /// Parse a line from the registry's index file into a Summary for a + /// package. + /// + /// The returned boolean is whether or not the summary has been yanked. + fn parse_registry_package(&mut self, line: &str) -> CargoResult<(Summary, bool)> { + let RegistryPackage { + name, + vers, + cksum, + deps, + features, + yanked, + links, + } = serde_json::from_str(line)?; + let pkgid = PackageId::new(&name, &vers, &self.source_id)?; + let deps = deps.into_iter() + .map(|dep| dep.into_dep(&self.source_id)) + .collect::>>()?; + let summary = Summary::new(pkgid, deps, features, links, false)?; + let summary = summary.set_checksum(cksum.clone()); + if self.hashes.contains_key(&name[..]) { + self.hashes.get_mut(&name[..]).unwrap().insert(vers, cksum); + } else { + self.hashes + .entry(name.into_owned()) + .or_insert_with(HashMap::new) + .insert(vers, cksum); + } + Ok((summary, yanked.unwrap_or(false))) + } + + pub fn query( + &mut self, + dep: &Dependency, + load: &mut RegistryData, + f: &mut FnMut(Summary), + ) -> CargoResult<()> { + let source_id = self.source_id.clone(); + let summaries = self.summaries(&*dep.name(), load)?; + let summaries = summaries + .iter() + .filter(|&&(_, yanked)| dep.source_id().precise().is_some() || !yanked) + .map(|s| s.0.clone()); + + // Handle `cargo update --precise` here. If specified, our own source + // will have a precise version listed of the form + // `=o->` where `` is the name of a crate on + // this source, `` is the version installed and ` is the + // version requested (argument to `--precise`). + let summaries = summaries.filter(|s| match source_id.precise() { + Some(p) if p.starts_with(&*dep.name()) && p[dep.name().len()..].starts_with('=') => { + let mut vers = p[dep.name().len() + 1..].splitn(2, "->"); + if dep.version_req() + .matches(&Version::parse(vers.next().unwrap()).unwrap()) + { + vers.next().unwrap() == s.version().to_string() + } else { + true + } + } + _ => true, + }); + + for summary in summaries { + if dep.matches(&summary) { + f(summary); + } + } + Ok(()) + } +} diff --git a/src/cargo/sources/registry/local.rs b/src/cargo/sources/registry/local.rs new file mode 100644 index 000000000..fa97c42a2 --- /dev/null +++ b/src/cargo/sources/registry/local.rs @@ -0,0 +1,103 @@ +use std::io::SeekFrom; +use std::io::prelude::*; +use std::path::Path; + +use core::PackageId; +use hex; +use sources::registry::{RegistryConfig, RegistryData}; +use util::FileLock; +use util::paths; +use util::{Config, Filesystem, Sha256}; +use util::errors::{CargoResult, CargoResultExt}; + +pub struct LocalRegistry<'cfg> { + index_path: Filesystem, + root: Filesystem, + src_path: Filesystem, + config: &'cfg Config, +} + +impl<'cfg> LocalRegistry<'cfg> { + pub fn new(root: &Path, config: &'cfg Config, name: &str) -> LocalRegistry<'cfg> { + LocalRegistry { + src_path: config.registry_source_path().join(name), + index_path: Filesystem::new(root.join("index")), + root: Filesystem::new(root.to_path_buf()), + config, + } + } +} + +impl<'cfg> RegistryData for LocalRegistry<'cfg> { + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load( + &self, + root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()> { + data(&paths::read_bytes(&root.join(path))?) + } + + fn config(&mut self) -> CargoResult> { + // Local registries don't have configuration for remote APIs or anything + // like that + Ok(None) + } + + fn update_index(&mut self) -> CargoResult<()> { + // Nothing to update, we just use what's on disk. Verify it actually + // exists though. We don't use any locks as we're just checking whether + // these directories exist. + let root = self.root.clone().into_path_unlocked(); + if !root.is_dir() { + bail!("local registry path is not a directory: {}", root.display()) + } + let index_path = self.index_path.clone().into_path_unlocked(); + if !index_path.is_dir() { + bail!( + "local registry index path is not a directory: {}", + index_path.display() + ) + } + Ok(()) + } + + fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult { + let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version()); + let mut crate_file = self.root.open_ro(&crate_file, self.config, "crate file")?; + + // If we've already got an unpacked version of this crate, then skip the + // checksum below as it is in theory already verified. + let dst = format!("{}-{}", pkg.name(), pkg.version()); + if self.src_path.join(dst).into_path_unlocked().exists() { + return Ok(crate_file); + } + + self.config.shell().status("Unpacking", pkg)?; + + // We don't actually need to download anything per-se, we just need to + // verify the checksum matches the .crate file itself. + let mut state = Sha256::new(); + let mut buf = [0; 64 * 1024]; + loop { + let n = crate_file + .read(&mut buf) + .chain_err(|| format!("failed to read `{}`", crate_file.path().display()))?; + if n == 0 { + break; + } + state.update(&buf[..n]); + } + if hex::encode(state.finish()) != checksum { + bail!("failed to verify the checksum of `{}`", pkg) + } + + crate_file.seek(SeekFrom::Start(0))?; + + Ok(crate_file) + } +} diff --git a/src/cargo/sources/registry/mod.rs b/src/cargo/sources/registry/mod.rs new file mode 100644 index 000000000..2e6f63228 --- /dev/null +++ b/src/cargo/sources/registry/mod.rs @@ -0,0 +1,498 @@ +//! A `Source` for registry-based packages. +//! +//! # What's a Registry? +//! +//! Registries are central locations where packages can be uploaded to, +//! discovered, and searched for. The purpose of a registry is to have a +//! location that serves as permanent storage for versions of a crate over time. +//! +//! Compared to git sources, a registry provides many packages as well as many +//! versions simultaneously. Git sources can also have commits deleted through +//! rebasings where registries cannot have their versions deleted. +//! +//! # The Index of a Registry +//! +//! One of the major difficulties with a registry is that hosting so many +//! packages may quickly run into performance problems when dealing with +//! dependency graphs. It's infeasible for cargo to download the entire contents +//! of the registry just to resolve one package's dependencies, for example. As +//! a result, cargo needs some efficient method of querying what packages are +//! available on a registry, what versions are available, and what the +//! dependencies for each version is. +//! +//! One method of doing so would be having the registry expose an HTTP endpoint +//! which can be queried with a list of packages and a response of their +//! dependencies and versions is returned. This is somewhat inefficient however +//! as we may have to hit the endpoint many times and we may have already +//! queried for much of the data locally already (for other packages, for +//! example). This also involves inventing a transport format between the +//! registry and Cargo itself, so this route was not taken. +//! +//! Instead, Cargo communicates with registries through a git repository +//! referred to as the Index. The Index of a registry is essentially an easily +//! query-able version of the registry's database for a list of versions of a +//! package as well as a list of dependencies for each version. +//! +//! Using git to host this index provides a number of benefits: +//! +//! * The entire index can be stored efficiently locally on disk. This means +//! that all queries of a registry can happen locally and don't need to touch +//! the network. +//! +//! * Updates of the index are quite efficient. Using git buys incremental +//! updates, compressed transmission, etc for free. The index must be updated +//! each time we need fresh information from a registry, but this is one +//! update of a git repository that probably hasn't changed a whole lot so +//! it shouldn't be too expensive. +//! +//! Additionally, each modification to the index is just appending a line at +//! the end of a file (the exact format is described later). This means that +//! the commits for an index are quite small and easily applied/compressable. +//! +//! ## The format of the Index +//! +//! The index is a store for the list of versions for all packages known, so its +//! format on disk is optimized slightly to ensure that `ls registry` doesn't +//! produce a list of all packages ever known. The index also wants to ensure +//! that there's not a million files which may actually end up hitting +//! filesystem limits at some point. To this end, a few decisions were made +//! about the format of the registry: +//! +//! 1. Each crate will have one file corresponding to it. Each version for a +//! crate will just be a line in this file. +//! 2. There will be two tiers of directories for crate names, under which +//! crates corresponding to those tiers will be located. +//! +//! As an example, this is an example hierarchy of an index: +//! +//! ```notrust +//! . +//! ├── 3 +//! │   └── u +//! │   └── url +//! ├── bz +//! │   └── ip +//! │   └── bzip2 +//! ├── config.json +//! ├── en +//! │   └── co +//! │   └── encoding +//! └── li +//!    ├── bg +//!    │   └── libgit2 +//!    └── nk +//!    └── link-config +//! ``` +//! +//! The root of the index contains a `config.json` file with a few entries +//! corresponding to the registry (see `RegistryConfig` below). +//! +//! Otherwise, there are three numbered directories (1, 2, 3) for crates with +//! names 1, 2, and 3 characters in length. The 1/2 directories simply have the +//! crate files underneath them, while the 3 directory is sharded by the first +//! letter of the crate name. +//! +//! Otherwise the top-level directory contains many two-letter directory names, +//! each of which has many sub-folders with two letters. At the end of all these +//! are the actual crate files themselves. +//! +//! The purpose of this layout is to hopefully cut down on `ls` sizes as well as +//! efficient lookup based on the crate name itself. +//! +//! ## Crate files +//! +//! Each file in the index is the history of one crate over time. Each line in +//! the file corresponds to one version of a crate, stored in JSON format (see +//! the `RegistryPackage` structure below). +//! +//! As new versions are published, new lines are appended to this file. The only +//! modifications to this file that should happen over time are yanks of a +//! particular version. +//! +//! # Downloading Packages +//! +//! The purpose of the Index was to provide an efficient method to resolve the +//! dependency graph for a package. So far we only required one network +//! interaction to update the registry's repository (yay!). After resolution has +//! been performed, however we need to download the contents of packages so we +//! can read the full manifest and build the source code. +//! +//! To accomplish this, this source's `download` method will make an HTTP +//! request per-package requested to download tarballs into a local cache. These +//! tarballs will then be unpacked into a destination folder. +//! +//! Note that because versions uploaded to the registry are frozen forever that +//! the HTTP download and unpacking can all be skipped if the version has +//! already been downloaded and unpacked. This caching allows us to only +//! download a package when absolutely necessary. +//! +//! # Filesystem Hierarchy +//! +//! Overall, the `$HOME/.cargo` looks like this when talking about the registry: +//! +//! ```notrust +//! # A folder under which all registry metadata is hosted (similar to +//! # $HOME/.cargo/git) +//! $HOME/.cargo/registry/ +//! +//! # For each registry that cargo knows about (keyed by hostname + hash) +//! # there is a folder which is the checked out version of the index for +//! # the registry in this location. Note that this is done so cargo can +//! # support multiple registries simultaneously +//! index/ +//! registry1-/ +//! registry2-/ +//! ... +//! +//! # This folder is a cache for all downloaded tarballs from a registry. +//! # Once downloaded and verified, a tarball never changes. +//! cache/ +//! registry1-/-.crate +//! ... +//! +//! # Location in which all tarballs are unpacked. Each tarball is known to +//! # be frozen after downloading, so transitively this folder is also +//! # frozen once its unpacked (it's never unpacked again) +//! src/ +//! registry1-/-/... +//! ... +//! ``` + +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::fs::File; +use std::path::{Path, PathBuf}; + +use flate2::read::GzDecoder; +use semver::Version; +use tar::Archive; + +use core::{Package, PackageId, Source, SourceId, Summary}; +use core::dependency::{Dependency, Kind}; +use sources::PathSource; +use util::{internal, CargoResult, Config, FileLock, Filesystem}; +use util::errors::CargoResultExt; +use util::hex; +use util::to_url::ToUrl; + +const INDEX_LOCK: &str = ".cargo-index-lock"; +pub const CRATES_IO: &str = "https://github.com/rust-lang/crates.io-index"; +const CRATE_TEMPLATE: &str = "{crate}"; +const VERSION_TEMPLATE: &str = "{version}"; + +pub struct RegistrySource<'cfg> { + source_id: SourceId, + src_path: Filesystem, + config: &'cfg Config, + updated: bool, + ops: Box, + index: index::RegistryIndex<'cfg>, + index_locked: bool, +} + +#[derive(Deserialize)] +pub struct RegistryConfig { + /// Download endpoint for all crates. + /// + /// The string is a template which will generate the download URL for the + /// tarball of a specific version of a crate. The substrings `{crate}` and + /// `{version}` will be replaced with the crate's name and version + /// respectively. + /// + /// For backwards compatibility, if the string does not contain `{crate}` or + /// `{version}`, it will be extended with `/{crate}/{version}/download` to + /// support registries like crates.io which were crated before the + /// templating setup was created. + pub dl: String, + + /// API endpoint for the registry. This is what's actually hit to perform + /// operations like yanks, owner modifications, publish new crates, etc. + pub api: Option, +} + +#[derive(Deserialize)] +pub struct RegistryPackage<'a> { + name: Cow<'a, str>, + vers: Version, + deps: Vec>, + features: BTreeMap>, + cksum: String, + yanked: Option, + links: Option, +} + +#[derive(Deserialize)] +#[serde(field_identifier, rename_all = "lowercase")] +enum Field { + Name, + Vers, + Deps, + Features, + Cksum, + Yanked, + Links, +} + +#[derive(Deserialize)] +struct RegistryDependency<'a> { + name: Cow<'a, str>, + req: Cow<'a, str>, + features: Vec, + optional: bool, + default_features: bool, + target: Option>, + kind: Option>, + registry: Option, +} + +impl<'a> RegistryDependency<'a> { + /// Converts an encoded dependency in the registry to a cargo dependency + pub fn into_dep(self, default: &SourceId) -> CargoResult { + let RegistryDependency { + name, + req, + mut features, + optional, + default_features, + target, + kind, + registry, + } = self; + + let id = if let Some(registry) = registry { + SourceId::for_registry(®istry.to_url()?)? + } else { + default.clone() + }; + + let mut dep = Dependency::parse_no_deprecated(&name, Some(&req), &id)?; + let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") { + "dev" => Kind::Development, + "build" => Kind::Build, + _ => Kind::Normal, + }; + + let platform = match target { + Some(target) => Some(target.parse()?), + None => None, + }; + + // Unfortunately older versions of cargo and/or the registry ended up + // publishing lots of entries where the features array contained the + // empty feature, "", inside. This confuses the resolution process much + // later on and these features aren't actually valid, so filter them all + // out here. + features.retain(|s| !s.is_empty()); + + dep.set_optional(optional) + .set_default_features(default_features) + .set_features(features) + .set_platform(platform) + .set_kind(kind); + + Ok(dep) + } +} + +pub trait RegistryData { + fn index_path(&self) -> &Filesystem; + fn load( + &self, + _root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()>; + fn config(&mut self) -> CargoResult>; + fn update_index(&mut self) -> CargoResult<()>; + fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult; + + fn is_crate_downloaded(&self, _pkg: &PackageId) -> bool { + true + } +} + +mod index; +mod remote; +mod local; + +fn short_name(id: &SourceId) -> String { + let hash = hex::short_hash(id); + let ident = id.url().host_str().unwrap_or("").to_string(); + format!("{}-{}", ident, hash) +} + +impl<'cfg> RegistrySource<'cfg> { + pub fn remote(source_id: &SourceId, config: &'cfg Config) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = remote::RemoteRegistry::new(source_id, config, &name); + RegistrySource::new(source_id, config, &name, Box::new(ops), true) + } + + pub fn local(source_id: &SourceId, path: &Path, config: &'cfg Config) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = local::LocalRegistry::new(path, config, &name); + RegistrySource::new(source_id, config, &name, Box::new(ops), false) + } + + fn new( + source_id: &SourceId, + config: &'cfg Config, + name: &str, + ops: Box, + index_locked: bool, + ) -> RegistrySource<'cfg> { + RegistrySource { + src_path: config.registry_source_path().join(name), + config, + source_id: source_id.clone(), + updated: false, + index: index::RegistryIndex::new(source_id, ops.index_path(), config, index_locked), + index_locked, + ops, + } + } + + /// Decode the configuration stored within the registry. + /// + /// This requires that the index has been at least checked out. + pub fn config(&mut self) -> CargoResult> { + self.ops.config() + } + + /// Unpacks a downloaded package into a location where it's ready to be + /// compiled. + /// + /// No action is taken if the source looks like it's already unpacked. + fn unpack_package(&self, pkg: &PackageId, tarball: &FileLock) -> CargoResult { + let dst = self.src_path + .join(&format!("{}-{}", pkg.name(), pkg.version())); + dst.create_dir()?; + // Note that we've already got the `tarball` locked above, and that + // implies a lock on the unpacked destination as well, so this access + // via `into_path_unlocked` should be ok. + let dst = dst.into_path_unlocked(); + let ok = dst.join(".cargo-ok"); + if ok.exists() { + return Ok(dst); + } + + let gz = GzDecoder::new(tarball.file()); + let mut tar = Archive::new(gz); + let prefix = dst.file_name().unwrap(); + let parent = dst.parent().unwrap(); + for entry in tar.entries()? { + let mut entry = entry.chain_err(|| "failed to iterate over archive")?; + let entry_path = entry + .path() + .chain_err(|| "failed to read entry path")? + .into_owned(); + + // We're going to unpack this tarball into the global source + // directory, but we want to make sure that it doesn't accidentally + // (or maliciously) overwrite source code from other crates. Cargo + // itself should never generate a tarball that hits this error, and + // crates.io should also block uploads with these sorts of tarballs, + // but be extra sure by adding a check here as well. + if !entry_path.starts_with(prefix) { + bail!( + "invalid tarball downloaded, contains \ + a file at {:?} which isn't under {:?}", + entry_path, + prefix + ) + } + + // Once that's verified, unpack the entry as usual. + entry + .unpack_in(parent) + .chain_err(|| format!("failed to unpack entry at `{}`", entry_path.display()))?; + } + File::create(&ok)?; + Ok(dst.clone()) + } + + fn do_update(&mut self) -> CargoResult<()> { + self.ops.update_index()?; + let path = self.ops.index_path(); + self.index = + index::RegistryIndex::new(&self.source_id, path, self.config, self.index_locked); + Ok(()) + } +} + +impl<'cfg> Source for RegistrySource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + // If this is a precise dependency, then it came from a lockfile and in + // theory the registry is known to contain this version. If, however, we + // come back with no summaries, then our registry may need to be + // updated, so we fall back to performing a lazy update. + if dep.source_id().precise().is_some() && !self.updated { + let mut called = false; + self.index.query(dep, &mut *self.ops, &mut |s| { + called = true; + f(s); + })?; + if called { + return Ok(()); + } else { + self.do_update()?; + } + } + + self.index.query(dep, &mut *self.ops, f) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + false + } + + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + // If we have an imprecise version then we don't know what we're going + // to look for, so we always attempt to perform an update here. + // + // If we have a precise version, then we'll update lazily during the + // querying phase. Note that precise in this case is only + // `Some("locked")` as other `Some` values indicate a `cargo update + // --precise` request + if self.source_id.precise() != Some("locked") { + self.do_update()?; + } + Ok(()) + } + + fn download(&mut self, package: &PackageId) -> CargoResult { + let hash = self.index.hash(package, &mut *self.ops)?; + let path = self.ops.download(package, &hash)?; + let path = self.unpack_package(package, &path) + .chain_err(|| internal(format!("failed to unpack package `{}`", package)))?; + let mut src = PathSource::new(&path, &self.source_id, self.config); + src.update()?; + let pkg = src.download(package)?; + + // Unfortunately the index and the actual Cargo.toml in the index can + // differ due to historical Cargo bugs. To paper over these we trash the + // *summary* loaded from the Cargo.toml we just downloaded with the one + // we loaded from the index. + let summaries = self.index.summaries(&*package.name(), &mut *self.ops)?; + let summary = summaries + .iter() + .map(|s| &s.0) + .find(|s| s.package_id() == package) + .expect("summary not found"); + let mut manifest = pkg.manifest().clone(); + manifest.set_summary(summary.clone()); + Ok(Package::new(manifest, pkg.manifest_path())) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } +} diff --git a/src/cargo/sources/registry/remote.rs b/src/cargo/sources/registry/remote.rs new file mode 100644 index 000000000..3d50eda9c --- /dev/null +++ b/src/cargo/sources/registry/remote.rs @@ -0,0 +1,289 @@ +use std::cell::{Cell, Ref, RefCell}; +use std::fmt::Write as FmtWrite; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::mem; +use std::path::Path; +use std::str; + +use git2; +use hex; +use serde_json; +use lazycell::LazyCell; + +use core::{PackageId, SourceId}; +use sources::git; +use sources::registry::{RegistryConfig, RegistryData, CRATE_TEMPLATE, INDEX_LOCK, VERSION_TEMPLATE}; +use util::network; +use util::{FileLock, Filesystem}; +use util::{Config, Progress, Sha256, ToUrl}; +use util::errors::{CargoResult, CargoResultExt, HttpNot200}; + +pub struct RemoteRegistry<'cfg> { + index_path: Filesystem, + cache_path: Filesystem, + source_id: SourceId, + config: &'cfg Config, + tree: RefCell>>, + repo: LazyCell, + head: Cell>, +} + +impl<'cfg> RemoteRegistry<'cfg> { + pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str) -> RemoteRegistry<'cfg> { + RemoteRegistry { + index_path: config.registry_index_path().join(name), + cache_path: config.registry_cache_path().join(name), + source_id: source_id.clone(), + config, + tree: RefCell::new(None), + repo: LazyCell::new(), + head: Cell::new(None), + } + } + + fn repo(&self) -> CargoResult<&git2::Repository> { + self.repo.try_borrow_with(|| { + let path = self.index_path.clone().into_path_unlocked(); + + // Fast path without a lock + if let Ok(repo) = git2::Repository::open(&path) { + return Ok(repo); + } + + // Ok, now we need to lock and try the whole thing over again. + let lock = + self.index_path + .open_rw(Path::new(INDEX_LOCK), self.config, "the registry index")?; + match git2::Repository::open(&path) { + Ok(repo) => Ok(repo), + Err(_) => { + let _ = lock.remove_siblings(); + + // Note that we'd actually prefer to use a bare repository + // here as we're not actually going to check anything out. + // All versions of Cargo, though, share the same CARGO_HOME, + // so for compatibility with older Cargo which *does* do + // checkouts we make sure to initialize a new full + // repository (not a bare one). + // + // We should change this to `init_bare` whenever we feel + // like enough time has passed or if we change the directory + // that the folder is located in, such as by changing the + // hash at the end of the directory. + Ok(git2::Repository::init(&path)?) + } + } + }) + } + + fn head(&self) -> CargoResult { + if self.head.get().is_none() { + let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?; + self.head.set(Some(oid)); + } + Ok(self.head.get().unwrap()) + } + + fn tree(&self) -> CargoResult> { + { + let tree = self.tree.borrow(); + if tree.is_some() { + return Ok(Ref::map(tree, |s| s.as_ref().unwrap())); + } + } + let repo = self.repo()?; + let commit = repo.find_commit(self.head()?)?; + let tree = commit.tree()?; + + // Unfortunately in libgit2 the tree objects look like they've got a + // reference to the repository object which means that a tree cannot + // outlive the repository that it came from. Here we want to cache this + // tree, though, so to accomplish this we transmute it to a static + // lifetime. + // + // Note that we don't actually hand out the static lifetime, instead we + // only return a scoped one from this function. Additionally the repo + // we loaded from (above) lives as long as this object + // (`RemoteRegistry`) so we then just need to ensure that the tree is + // destroyed first in the destructor, hence the destructor on + // `RemoteRegistry` below. + let tree = unsafe { mem::transmute::>(tree) }; + *self.tree.borrow_mut() = Some(tree); + Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap())) + } +} + +impl<'cfg> RegistryData for RemoteRegistry<'cfg> { + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load( + &self, + _root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()> { + // Note that the index calls this method and the filesystem is locked + // in the index, so we don't need to worry about an `update_index` + // happening in a different process. + let repo = self.repo()?; + let tree = self.tree()?; + let entry = tree.get_path(path)?; + let object = entry.to_object(repo)?; + let blob = match object.as_blob() { + Some(blob) => blob, + None => bail!("path `{}` is not a blob in the git repo", path.display()), + }; + data(blob.content()) + } + + fn config(&mut self) -> CargoResult> { + self.repo()?; // create intermediate dirs and initialize the repo + let _lock = + self.index_path + .open_ro(Path::new(INDEX_LOCK), self.config, "the registry index")?; + let mut config = None; + self.load(Path::new(""), Path::new("config.json"), &mut |json| { + config = Some(serde_json::from_slice(json)?); + Ok(()) + })?; + Ok(config) + } + + fn update_index(&mut self) -> CargoResult<()> { + if self.config.cli_unstable().offline { + return Ok(()); + } + if self.config.cli_unstable().no_index_update { + return Ok(()); + } + + // Ensure that we'll actually be able to acquire an HTTP handle later on + // once we start trying to download crates. This will weed out any + // problems with `.cargo/config` configuration related to HTTP. + // + // This way if there's a problem the error gets printed before we even + // hit the index, which may not actually read this configuration. + self.config.http()?; + + self.repo()?; + self.head.set(None); + *self.tree.borrow_mut() = None; + let _lock = + self.index_path + .open_rw(Path::new(INDEX_LOCK), self.config, "the registry index")?; + self.config + .shell() + .status("Updating", self.source_id.display_registry())?; + + // git fetch origin master + let url = self.source_id.url(); + let refspec = "refs/heads/master:refs/remotes/origin/master"; + let repo = self.repo.borrow_mut().unwrap(); + git::fetch(repo, url, refspec, self.config) + .chain_err(|| format!("failed to fetch `{}`", url))?; + Ok(()) + } + + fn download(&mut self, pkg: &PackageId, checksum: &str) -> CargoResult { + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let path = Path::new(&filename); + + // Attempt to open an read-only copy first to avoid an exclusive write + // lock and also work with read-only filesystems. Note that we check the + // length of the file like below to handle interrupted downloads. + // + // If this fails then we fall through to the exclusive path where we may + // have to redownload the file. + if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) { + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(dst); + } + } + let mut dst = self.cache_path.open_rw(path, self.config, &filename)?; + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(dst); + } + self.config.shell().status("Downloading", pkg)?; + + let config = self.config()?.unwrap(); + let mut url = config.dl.clone(); + if !url.contains(CRATE_TEMPLATE) && !url.contains(VERSION_TEMPLATE) { + write!(url, "/{}/{}/download", CRATE_TEMPLATE, VERSION_TEMPLATE).unwrap(); + } + let url = url.replace(CRATE_TEMPLATE, &*pkg.name()) + .replace(VERSION_TEMPLATE, &pkg.version().to_string()) + .to_url()?; + + // TODO: don't download into memory, but ensure that if we ctrl-c a + // download we should resume either from the start or the middle + // on the next time + let url = url.to_string(); + let mut handle = self.config.http()?.borrow_mut(); + handle.get(true)?; + handle.url(&url)?; + handle.follow_location(true)?; + let mut state = Sha256::new(); + let mut body = Vec::new(); + network::with_retry(self.config, || { + state = Sha256::new(); + body = Vec::new(); + let mut pb = Progress::new("Fetch", self.config); + { + handle.progress(true)?; + let mut handle = handle.transfer(); + handle.progress_function(|dl_total, dl_cur, _, _| { + pb.tick(dl_cur as usize, dl_total as usize).is_ok() + })?; + handle.write_function(|buf| { + state.update(buf); + body.extend_from_slice(buf); + Ok(buf.len()) + })?; + handle.perform()?; + } + let code = handle.response_code()?; + if code != 200 && code != 0 { + let url = handle.effective_url()?.unwrap_or(&url); + Err(HttpNot200 { + code, + url: url.to_string(), + }.into()) + } else { + Ok(()) + } + })?; + + // Verify what we just downloaded + if hex::encode(state.finish()) != checksum { + bail!("failed to verify the checksum of `{}`", pkg) + } + + dst.write_all(&body)?; + dst.seek(SeekFrom::Start(0))?; + Ok(dst) + } + + fn is_crate_downloaded(&self, pkg: &PackageId) -> bool { + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let path = Path::new(&filename); + + if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) { + if let Ok(meta) = dst.file().metadata() { + return meta.len() > 0; + } + } + false + } +} + +impl<'cfg> Drop for RemoteRegistry<'cfg> { + fn drop(&mut self) { + // Just be sure to drop this before our other fields + self.tree.borrow_mut().take(); + } +} diff --git a/src/cargo/sources/replaced.rs b/src/cargo/sources/replaced.rs new file mode 100644 index 000000000..16d867c17 --- /dev/null +++ b/src/cargo/sources/replaced.rs @@ -0,0 +1,73 @@ +use core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use util::errors::{CargoResult, CargoResultExt}; + +pub struct ReplacedSource<'cfg> { + to_replace: SourceId, + replace_with: SourceId, + inner: Box, +} + +impl<'cfg> ReplacedSource<'cfg> { + pub fn new( + to_replace: &SourceId, + replace_with: &SourceId, + src: Box, + ) -> ReplacedSource<'cfg> { + ReplacedSource { + to_replace: to_replace.clone(), + replace_with: replace_with.clone(), + inner: src, + } + } +} + +impl<'cfg> Source for ReplacedSource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> { + let (replace_with, to_replace) = (&self.replace_with, &self.to_replace); + let dep = dep.clone().map_source(to_replace, replace_with); + + self.inner + .query( + &dep, + &mut |summary| f(summary.map_source(replace_with, to_replace)), + ) + .chain_err(|| format!("failed to query replaced source {}", self.to_replace))?; + Ok(()) + } + + fn supports_checksums(&self) -> bool { + self.inner.supports_checksums() + } + + fn requires_precise(&self) -> bool { + self.inner.requires_precise() + } + + fn source_id(&self) -> &SourceId { + &self.to_replace + } + + fn update(&mut self) -> CargoResult<()> { + self.inner + .update() + .chain_err(|| format!("failed to update replaced source {}", self.to_replace))?; + Ok(()) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + let id = id.with_source_id(&self.replace_with); + let pkg = self.inner + .download(&id) + .chain_err(|| format!("failed to download replaced source {}", self.to_replace))?; + Ok(pkg.map_source(&self.replace_with, &self.to_replace)) + } + + fn fingerprint(&self, id: &Package) -> CargoResult { + self.inner.fingerprint(id) + } + + fn verify(&self, id: &PackageId) -> CargoResult<()> { + let id = id.with_source_id(&self.replace_with); + self.inner.verify(&id) + } +} diff --git a/src/cargo/util/cfg.rs b/src/cargo/util/cfg.rs new file mode 100644 index 000000000..03de8444f --- /dev/null +++ b/src/cargo/util/cfg.rs @@ -0,0 +1,263 @@ +use std::str::{self, FromStr}; +use std::iter; +use std::fmt; + +use util::{CargoError, CargoResult}; + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Cfg { + Name(String), + KeyPair(String, String), +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum CfgExpr { + Not(Box), + All(Vec), + Any(Vec), + Value(Cfg), +} + +#[derive(PartialEq)] +enum Token<'a> { + LeftParen, + RightParen, + Ident(&'a str), + Comma, + Equals, + String(&'a str), +} + +struct Tokenizer<'a> { + s: iter::Peekable>, + orig: &'a str, +} + +struct Parser<'a> { + t: iter::Peekable>, +} + +impl FromStr for Cfg { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.cfg()?; + if p.t.next().is_some() { + bail!("malformed cfg value or key/value pair: `{}`", s) + } + Ok(e) + } +} + +impl fmt::Display for Cfg { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Cfg::Name(ref s) => s.fmt(f), + Cfg::KeyPair(ref k, ref v) => write!(f, "{} = \"{}\"", k, v), + } + } +} + +impl CfgExpr { + pub fn matches(&self, cfg: &[Cfg]) -> bool { + match *self { + CfgExpr::Not(ref e) => !e.matches(cfg), + CfgExpr::All(ref e) => e.iter().all(|e| e.matches(cfg)), + CfgExpr::Any(ref e) => e.iter().any(|e| e.matches(cfg)), + CfgExpr::Value(ref e) => cfg.contains(e), + } + } +} + +impl FromStr for CfgExpr { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.expr()?; + if p.t.next().is_some() { + bail!( + "can only have one cfg-expression, consider using all() or \ + any() explicitly" + ) + } + Ok(e) + } +} + +impl fmt::Display for CfgExpr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CfgExpr::Not(ref e) => write!(f, "not({})", e), + CfgExpr::All(ref e) => write!(f, "all({})", CommaSep(e)), + CfgExpr::Any(ref e) => write!(f, "any({})", CommaSep(e)), + CfgExpr::Value(ref e) => write!(f, "{}", e), + } + } +} + +struct CommaSep<'a, T: 'a>(&'a [T]); + +impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (i, v) in self.0.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", v)?; + } + Ok(()) + } +} + +impl<'a> Parser<'a> { + fn new(s: &'a str) -> Parser<'a> { + Parser { + t: Tokenizer { + s: s.char_indices().peekable(), + orig: s, + }.peekable(), + } + } + + fn expr(&mut self) -> CargoResult { + match self.t.peek() { + Some(&Ok(Token::Ident(op @ "all"))) | Some(&Ok(Token::Ident(op @ "any"))) => { + self.t.next(); + let mut e = Vec::new(); + self.eat(Token::LeftParen)?; + while !self.try(Token::RightParen) { + e.push(self.expr()?); + if !self.try(Token::Comma) { + self.eat(Token::RightParen)?; + break; + } + } + if op == "all" { + Ok(CfgExpr::All(e)) + } else { + Ok(CfgExpr::Any(e)) + } + } + Some(&Ok(Token::Ident("not"))) => { + self.t.next(); + self.eat(Token::LeftParen)?; + let e = self.expr()?; + self.eat(Token::RightParen)?; + Ok(CfgExpr::Not(Box::new(e))) + } + Some(&Ok(..)) => self.cfg().map(CfgExpr::Value), + Some(&Err(..)) => Err(self.t.next().unwrap().err().unwrap()), + None => bail!( + "expected start of a cfg expression, \ + found nothing" + ), + } + } + + fn cfg(&mut self) -> CargoResult { + match self.t.next() { + Some(Ok(Token::Ident(name))) => { + let e = if self.try(Token::Equals) { + let val = match self.t.next() { + Some(Ok(Token::String(s))) => s, + Some(Ok(t)) => bail!("expected a string, found {}", t.classify()), + Some(Err(e)) => return Err(e), + None => bail!("expected a string, found nothing"), + }; + Cfg::KeyPair(name.to_string(), val.to_string()) + } else { + Cfg::Name(name.to_string()) + }; + Ok(e) + } + Some(Ok(t)) => bail!("expected identifier, found {}", t.classify()), + Some(Err(e)) => Err(e), + None => bail!("expected identifier, found nothing"), + } + } + + fn try(&mut self, token: Token<'a>) -> bool { + match self.t.peek() { + Some(&Ok(ref t)) if token == *t => {} + _ => return false, + } + self.t.next(); + true + } + + fn eat(&mut self, token: Token<'a>) -> CargoResult<()> { + match self.t.next() { + Some(Ok(ref t)) if token == *t => Ok(()), + Some(Ok(t)) => bail!("expected {}, found {}", token.classify(), t.classify()), + Some(Err(e)) => Err(e), + None => bail!("expected {}, but cfg expr ended", token.classify()), + } + } +} + +impl<'a> Iterator for Tokenizer<'a> { + type Item = CargoResult>; + + fn next(&mut self) -> Option>> { + loop { + match self.s.next() { + Some((_, ' ')) => {} + Some((_, '(')) => return Some(Ok(Token::LeftParen)), + Some((_, ')')) => return Some(Ok(Token::RightParen)), + Some((_, ',')) => return Some(Ok(Token::Comma)), + Some((_, '=')) => return Some(Ok(Token::Equals)), + Some((start, '"')) => { + while let Some((end, ch)) = self.s.next() { + if ch == '"' { + return Some(Ok(Token::String(&self.orig[start + 1..end]))); + } + } + return Some(Err(format_err!("unterminated string in cfg"))); + } + Some((start, ch)) if is_ident_start(ch) => { + while let Some(&(end, ch)) = self.s.peek() { + if !is_ident_rest(ch) { + return Some(Ok(Token::Ident(&self.orig[start..end]))); + } else { + self.s.next(); + } + } + return Some(Ok(Token::Ident(&self.orig[start..]))); + } + Some((_, ch)) => { + return Some(Err(format_err!( + "unexpected character in \ + cfg `{}`, expected parens, \ + a comma, an identifier, or \ + a string", + ch + ))) + } + None => return None, + } + } + } +} + +fn is_ident_start(ch: char) -> bool { + ch == '_' || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') +} + +fn is_ident_rest(ch: char) -> bool { + is_ident_start(ch) || ('0' <= ch && ch <= '9') +} + +impl<'a> Token<'a> { + fn classify(&self) -> &str { + match *self { + Token::LeftParen => "`(`", + Token::RightParen => "`)`", + Token::Ident(..) => "an identifier", + Token::Comma => "`,`", + Token::Equals => "`=`", + Token::String(..) => "a string", + } + } +} diff --git a/src/cargo/util/config.rs b/src/cargo/util/config.rs new file mode 100644 index 000000000..f3a1d7968 --- /dev/null +++ b/src/cargo/util/config.rs @@ -0,0 +1,1048 @@ +use std::cell::{RefCell, RefMut}; +use std::collections::HashSet; +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::hash_map::HashMap; +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::{Once, ONCE_INIT}; +use std::time::Instant; + +use curl::easy::Easy; +use jobserver; +use serde::{Serialize, Serializer}; +use toml; +use lazycell::LazyCell; + +use core::shell::Verbosity; +use core::{CliUnstable, Shell, SourceId, Workspace}; +use ops; +use url::Url; +use util::ToUrl; +use util::Rustc; +use util::errors::{internal, CargoError, CargoResult, CargoResultExt}; +use util::paths; +use util::toml as cargo_toml; +use util::Filesystem; + +use self::ConfigValue as CV; + +/// Configuration information for cargo. This is not specific to a build, it is information +/// relating to cargo itself. +/// +/// This struct implements `Default`: all fields can be inferred. +#[derive(Debug)] +pub struct Config { + /// The location of the users's 'home' directory. OS-dependent. + home_path: Filesystem, + /// Information about how to write messages to the shell + shell: RefCell, + /// Information on how to invoke the compiler (rustc) + rustc: LazyCell, + /// A collection of configuration options + values: LazyCell>, + /// The current working directory of cargo + cwd: PathBuf, + /// The location of the cargo executable (path to current process) + cargo_exe: LazyCell, + /// The location of the rustdoc executable + rustdoc: LazyCell, + /// Whether we are printing extra verbose messages + extra_verbose: bool, + /// `frozen` is set if we shouldn't access the network + frozen: bool, + /// `locked` is set if we should not update lock files + locked: bool, + /// A global static IPC control mechanism (used for managing parallel builds) + jobserver: Option, + /// Cli flags of the form "-Z something" + cli_flags: CliUnstable, + /// A handle on curl easy mode for http calls + easy: LazyCell>, + /// Cache of the `SourceId` for crates.io + crates_io_source_id: LazyCell, + /// If false, don't cache `rustc --version --verbose` invocations + cache_rustc_info: bool, + /// Creation time of this config, used to output the total build time + creation_time: Instant, + /// Target Directory via resolved Cli parameter + target_dir: Option, +} + +impl Config { + pub fn new(shell: Shell, cwd: PathBuf, homedir: PathBuf) -> Config { + static mut GLOBAL_JOBSERVER: *mut jobserver::Client = 0 as *mut _; + static INIT: Once = ONCE_INIT; + + // This should be called early on in the process, so in theory the + // unsafety is ok here. (taken ownership of random fds) + INIT.call_once(|| unsafe { + if let Some(client) = jobserver::Client::from_env() { + GLOBAL_JOBSERVER = Box::into_raw(Box::new(client)); + } + }); + + let cache_rustc_info = match env::var("CARGO_CACHE_RUSTC_INFO") { + Ok(cache) => cache != "0", + _ => true, + }; + + Config { + home_path: Filesystem::new(homedir), + shell: RefCell::new(shell), + rustc: LazyCell::new(), + cwd, + values: LazyCell::new(), + cargo_exe: LazyCell::new(), + rustdoc: LazyCell::new(), + extra_verbose: false, + frozen: false, + locked: false, + jobserver: unsafe { + if GLOBAL_JOBSERVER.is_null() { + None + } else { + Some((*GLOBAL_JOBSERVER).clone()) + } + }, + cli_flags: CliUnstable::default(), + easy: LazyCell::new(), + crates_io_source_id: LazyCell::new(), + cache_rustc_info, + creation_time: Instant::now(), + target_dir: None, + } + } + + pub fn default() -> CargoResult { + let shell = Shell::new(); + let cwd = + env::current_dir().chain_err(|| "couldn't get the current directory of the process")?; + let homedir = homedir(&cwd).ok_or_else(|| { + format_err!( + "Cargo couldn't find your home directory. \ + This probably means that $HOME was not set." + ) + })?; + Ok(Config::new(shell, cwd, homedir)) + } + + /// The user's cargo home directory (OS-dependent) + pub fn home(&self) -> &Filesystem { + &self.home_path + } + + /// The cargo git directory (`/git`) + pub fn git_path(&self) -> Filesystem { + self.home_path.join("git") + } + + /// The cargo registry index directory (`/registry/index`) + pub fn registry_index_path(&self) -> Filesystem { + self.home_path.join("registry").join("index") + } + + /// The cargo registry cache directory (`/registry/path`) + pub fn registry_cache_path(&self) -> Filesystem { + self.home_path.join("registry").join("cache") + } + + /// The cargo registry source directory (`/registry/src`) + pub fn registry_source_path(&self) -> Filesystem { + self.home_path.join("registry").join("src") + } + + /// Get a reference to the shell, for e.g. writing error messages + pub fn shell(&self) -> RefMut { + self.shell.borrow_mut() + } + + /// Get the path to the `rustdoc` executable + pub fn rustdoc(&self) -> CargoResult<&Path> { + self.rustdoc + .try_borrow_with(|| self.get_tool("rustdoc")) + .map(AsRef::as_ref) + } + + /// Get the path to the `rustc` executable + pub fn rustc(&self, ws: Option<&Workspace>) -> CargoResult { + let cache_location = ws.map(|ws| { + ws.target_dir() + .join(".rustc_info.json") + .into_path_unlocked() + }); + Rustc::new( + self.get_tool("rustc")?, + self.maybe_get_tool("rustc_wrapper")?, + &self.home() + .join("bin") + .join("rustc") + .into_path_unlocked() + .with_extension(env::consts::EXE_EXTENSION), + if self.cache_rustc_info { + cache_location + } else { + None + }, + ) + } + + /// Get the path to the `cargo` executable + pub fn cargo_exe(&self) -> CargoResult<&Path> { + self.cargo_exe + .try_borrow_with(|| { + fn from_current_exe() -> CargoResult { + // Try fetching the path to `cargo` using env::current_exe(). + // The method varies per operating system and might fail; in particular, + // it depends on /proc being mounted on Linux, and some environments + // (like containers or chroots) may not have that available. + let exe = env::current_exe()?.canonicalize()?; + Ok(exe) + } + + fn from_argv() -> CargoResult { + // Grab argv[0] and attempt to resolve it to an absolute path. + // If argv[0] has one component, it must have come from a PATH lookup, + // so probe PATH in that case. + // Otherwise, it has multiple components and is either: + // - a relative path (e.g. `./cargo`, `target/debug/cargo`), or + // - an absolute path (e.g. `/usr/local/bin/cargo`). + // In either case, Path::canonicalize will return the full absolute path + // to the target if it exists + let argv0 = env::args_os() + .map(PathBuf::from) + .next() + .ok_or(format_err!("no argv[0]"))?; + paths::resolve_executable(&argv0) + } + + let exe = from_current_exe() + .or_else(|_| from_argv()) + .chain_err(|| "couldn't get the path to cargo executable")?; + Ok(exe) + }) + .map(AsRef::as_ref) + } + + pub fn values(&self) -> CargoResult<&HashMap> { + self.values.try_borrow_with(|| self.load_values()) + } + + pub fn set_values(&self, values: HashMap) -> CargoResult<()> { + if self.values.borrow().is_some() { + bail!("config values already found") + } + match self.values.fill(values) { + Ok(()) => Ok(()), + Err(_) => bail!("could not fill values"), + } + } + + pub fn cwd(&self) -> &Path { + &self.cwd + } + + pub fn target_dir(&self) -> CargoResult> { + if let Some(ref dir) = self.target_dir { + Ok(Some(dir.clone())) + } else if let Some(dir) = env::var_os("CARGO_TARGET_DIR") { + Ok(Some(Filesystem::new(self.cwd.join(dir)))) + } else if let Some(val) = self.get_path("build.target-dir")? { + let val = self.cwd.join(val.val); + Ok(Some(Filesystem::new(val))) + } else { + Ok(None) + } + } + + fn get(&self, key: &str) -> CargoResult> { + let vals = self.values()?; + let mut parts = key.split('.').enumerate(); + let mut val = match vals.get(parts.next().unwrap().1) { + Some(val) => val, + None => return Ok(None), + }; + for (i, part) in parts { + match *val { + CV::Table(ref map, _) => { + val = match map.get(part) { + Some(val) => val, + None => return Ok(None), + } + } + CV::Integer(_, ref path) + | CV::String(_, ref path) + | CV::List(_, ref path) + | CV::Boolean(_, ref path) => { + let idx = key.split('.').take(i).fold(0, |n, s| n + s.len()) + i - 1; + let key_so_far = &key[..idx]; + bail!( + "expected table for configuration key `{}`, \ + but found {} in {}", + key_so_far, + val.desc(), + path.display() + ) + } + } + } + Ok(Some(val.clone())) + } + + fn get_env(&self, key: &str) -> CargoResult>> + where + CargoError: From, + { + let key = key.replace(".", "_") + .replace("-", "_") + .chars() + .flat_map(|c| c.to_uppercase()) + .collect::(); + match env::var(&format!("CARGO_{}", key)) { + Ok(value) => Ok(Some(Value { + val: value.parse()?, + definition: Definition::Environment, + })), + Err(..) => Ok(None), + } + } + + pub fn get_string(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)); + } + match self.get(key)? { + Some(CV::String(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("string", key, val), + None => Ok(None), + } + } + + pub fn get_bool(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)); + } + match self.get(key)? { + Some(CV::Boolean(b, path)) => Ok(Some(Value { + val: b, + definition: Definition::Path(path), + })), + Some(val) => self.expected("bool", key, val), + None => Ok(None), + } + } + + fn string_to_path(&self, value: String, definition: &Definition) -> PathBuf { + let is_path = value.contains('/') || (cfg!(windows) && value.contains('\\')); + if is_path { + definition.root(self).join(value) + } else { + // A pathless name + PathBuf::from(value) + } + } + + pub fn get_path(&self, key: &str) -> CargoResult>> { + if let Some(val) = self.get_string(key)? { + Ok(Some(Value { + val: self.string_to_path(val.val, &val.definition), + definition: val.definition, + })) + } else { + Ok(None) + } + } + + pub fn get_path_and_args( + &self, + key: &str, + ) -> CargoResult)>>> { + if let Some(mut val) = self.get_list_or_split_string(key)? { + if !val.val.is_empty() { + return Ok(Some(Value { + val: ( + self.string_to_path(val.val.remove(0), &val.definition), + val.val, + ), + definition: val.definition, + })); + } + } + Ok(None) + } + + pub fn get_list(&self, key: &str) -> CargoResult>>> { + match self.get(key)? { + Some(CV::List(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("list", key, val), + None => Ok(None), + } + } + + pub fn get_list_or_split_string(&self, key: &str) -> CargoResult>>> { + match self.get_env::(key) { + Ok(Some(value)) => { + return Ok(Some(Value { + val: value.val.split(' ').map(str::to_string).collect(), + definition: value.definition, + })) + } + Err(err) => return Err(err), + Ok(None) => (), + } + + match self.get(key)? { + Some(CV::List(i, path)) => Ok(Some(Value { + val: i.into_iter().map(|(s, _)| s).collect(), + definition: Definition::Path(path), + })), + Some(CV::String(i, path)) => Ok(Some(Value { + val: i.split(' ').map(str::to_string).collect(), + definition: Definition::Path(path), + })), + Some(val) => self.expected("list or string", key, val), + None => Ok(None), + } + } + + pub fn get_table(&self, key: &str) -> CargoResult>>> { + match self.get(key)? { + Some(CV::Table(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("table", key, val), + None => Ok(None), + } + } + + pub fn get_i64(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)); + } + match self.get(key)? { + Some(CV::Integer(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("integer", key, val), + None => Ok(None), + } + } + + pub fn net_retry(&self) -> CargoResult { + match self.get_i64("net.retry")? { + Some(v) => { + let value = v.val; + if value < 0 { + bail!( + "net.retry must be positive, but found {} in {}", + v.val, + v.definition + ) + } else { + Ok(value) + } + } + None => Ok(2), + } + } + + pub fn expected(&self, ty: &str, key: &str, val: CV) -> CargoResult { + val.expected(ty, key) + .map_err(|e| format_err!("invalid configuration for key `{}`\n{}", key, e)) + } + + pub fn configure( + &mut self, + verbose: u32, + quiet: Option, + color: &Option, + frozen: bool, + locked: bool, + target_dir: &Option, + unstable_flags: &[String], + ) -> CargoResult<()> { + let extra_verbose = verbose >= 2; + let verbose = if verbose == 0 { None } else { Some(true) }; + + // Ignore errors in the configuration files. + let cfg_verbose = self.get_bool("term.verbose").unwrap_or(None).map(|v| v.val); + let cfg_color = self.get_string("term.color").unwrap_or(None).map(|v| v.val); + + let color = color.as_ref().or_else(|| cfg_color.as_ref()); + + let verbosity = match (verbose, cfg_verbose, quiet) { + (Some(true), _, None) | (None, Some(true), None) => Verbosity::Verbose, + + // command line takes precedence over configuration, so ignore the + // configuration. + (None, _, Some(true)) => Verbosity::Quiet, + + // Can't pass both at the same time on the command line regardless + // of configuration. + (Some(true), _, Some(true)) => { + bail!("cannot set both --verbose and --quiet"); + } + + // Can't actually get `Some(false)` as a value from the command + // line, so just ignore them here to appease exhaustiveness checking + // in match statements. + (Some(false), _, _) + | (_, _, Some(false)) + | (None, Some(false), None) + | (None, None, None) => Verbosity::Normal, + }; + + let cli_target_dir = match target_dir.as_ref() { + Some(dir) => Some(Filesystem::new(dir.clone())), + None => None, + }; + + self.shell().set_verbosity(verbosity); + self.shell().set_color_choice(color.map(|s| &s[..]))?; + self.extra_verbose = extra_verbose; + self.frozen = frozen; + self.locked = locked; + self.target_dir = cli_target_dir; + self.cli_flags.parse(unstable_flags)?; + + Ok(()) + } + + pub fn cli_unstable(&self) -> &CliUnstable { + &self.cli_flags + } + + pub fn extra_verbose(&self) -> bool { + self.extra_verbose + } + + pub fn network_allowed(&self) -> bool { + !self.frozen() && !self.cli_unstable().offline + } + + pub fn frozen(&self) -> bool { + self.frozen + } + + pub fn lock_update_allowed(&self) -> bool { + !self.frozen && !self.locked + } + + /// Loads configuration from the filesystem + pub fn load_values(&self) -> CargoResult> { + let mut cfg = CV::Table(HashMap::new(), PathBuf::from(".")); + + walk_tree(&self.cwd, |path| { + let mut contents = String::new(); + let mut file = File::open(&path)?; + file.read_to_string(&mut contents) + .chain_err(|| format!("failed to read configuration file `{}`", path.display()))?; + let toml = cargo_toml::parse(&contents, path, self).chain_err(|| { + format!("could not parse TOML configuration in `{}`", path.display()) + })?; + let value = CV::from_toml(path, toml).chain_err(|| { + format!( + "failed to load TOML configuration from `{}`", + path.display() + ) + })?; + cfg.merge(value) + .chain_err(|| format!("failed to merge configuration at `{}`", path.display()))?; + Ok(()) + }).chain_err(|| "could not load Cargo configuration")?; + + self.load_credentials(&mut cfg)?; + match cfg { + CV::Table(map, _) => Ok(map), + _ => unreachable!(), + } + } + + /// Gets the index for a registry. + pub fn get_registry_index(&self, registry: &str) -> CargoResult { + Ok( + match self.get_string(&format!("registries.{}.index", registry))? { + Some(index) => { + let url = index.val.to_url()?; + if url.username() != "" || url.password().is_some() { + bail!("Registry URLs may not contain credentials"); + } + url + } + None => bail!("No index found for registry: `{}`", registry), + }, + ) + } + + /// Loads credentials config from the credentials file into the ConfigValue object, if present. + fn load_credentials(&self, cfg: &mut ConfigValue) -> CargoResult<()> { + let home_path = self.home_path.clone().into_path_unlocked(); + let credentials = home_path.join("credentials"); + if !fs::metadata(&credentials).is_ok() { + return Ok(()); + } + + let mut contents = String::new(); + let mut file = File::open(&credentials)?; + file.read_to_string(&mut contents).chain_err(|| { + format!( + "failed to read configuration file `{}`", + credentials.display() + ) + })?; + + let toml = cargo_toml::parse(&contents, &credentials, self).chain_err(|| { + format!( + "could not parse TOML configuration in `{}`", + credentials.display() + ) + })?; + + let mut value = CV::from_toml(&credentials, toml).chain_err(|| { + format!( + "failed to load TOML configuration from `{}`", + credentials.display() + ) + })?; + + // backwards compatibility for old .cargo/credentials layout + { + let value = match value { + CV::Table(ref mut value, _) => value, + _ => unreachable!(), + }; + + if let Some(token) = value.remove("token") { + if let Vacant(entry) = value.entry("registry".into()) { + let mut map = HashMap::new(); + map.insert("token".into(), token); + let table = CV::Table(map, PathBuf::from(".")); + entry.insert(table); + } + } + } + + // we want value to override cfg, so swap these + mem::swap(cfg, &mut value); + cfg.merge(value)?; + + Ok(()) + } + + /// Look for a path for `tool` in an environment variable or config path, but return `None` + /// if it's not present. + fn maybe_get_tool(&self, tool: &str) -> CargoResult> { + let var = tool.chars() + .flat_map(|c| c.to_uppercase()) + .collect::(); + if let Some(tool_path) = env::var_os(&var) { + let maybe_relative = match tool_path.to_str() { + Some(s) => s.contains("/") || s.contains("\\"), + None => false, + }; + let path = if maybe_relative { + self.cwd.join(tool_path) + } else { + PathBuf::from(tool_path) + }; + return Ok(Some(path)); + } + + let var = format!("build.{}", tool); + if let Some(tool_path) = self.get_path(&var)? { + return Ok(Some(tool_path.val)); + } + + Ok(None) + } + + /// Look for a path for `tool` in an environment variable or config path, defaulting to `tool` + /// as a path. + fn get_tool(&self, tool: &str) -> CargoResult { + self.maybe_get_tool(tool) + .map(|t| t.unwrap_or_else(|| PathBuf::from(tool))) + } + + pub fn jobserver_from_env(&self) -> Option<&jobserver::Client> { + self.jobserver.as_ref() + } + + pub fn http(&self) -> CargoResult<&RefCell> { + let http = self.easy + .try_borrow_with(|| ops::http_handle(self).map(RefCell::new))?; + { + let mut http = http.borrow_mut(); + http.reset(); + ops::configure_http_handle(self, &mut http)?; + } + Ok(http) + } + + pub fn crates_io_source_id(&self, f: F) -> CargoResult + where + F: FnMut() -> CargoResult, + { + Ok(self.crates_io_source_id.try_borrow_with(f)?.clone()) + } + + pub fn creation_time(&self) -> Instant { + self.creation_time + } +} + +#[derive(Eq, PartialEq, Clone, Copy)] +pub enum Location { + Project, + Global, +} + +#[derive(Eq, PartialEq, Clone, Deserialize)] +pub enum ConfigValue { + Integer(i64, PathBuf), + String(String, PathBuf), + List(Vec<(String, PathBuf)>, PathBuf), + Table(HashMap, PathBuf), + Boolean(bool, PathBuf), +} + +pub struct Value { + pub val: T, + pub definition: Definition, +} + +pub enum Definition { + Path(PathBuf), + Environment, +} + +impl fmt::Debug for ConfigValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CV::Integer(i, ref path) => write!(f, "{} (from {})", i, path.display()), + CV::Boolean(b, ref path) => write!(f, "{} (from {})", b, path.display()), + CV::String(ref s, ref path) => write!(f, "{} (from {})", s, path.display()), + CV::List(ref list, ref path) => { + write!(f, "[")?; + for (i, &(ref s, ref path)) in list.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{} (from {})", s, path.display())?; + } + write!(f, "] (from {})", path.display()) + } + CV::Table(ref table, _) => write!(f, "{:?}", table), + } + } +} + +impl Serialize for ConfigValue { + fn serialize(&self, s: S) -> Result { + match *self { + CV::String(ref string, _) => string.serialize(s), + CV::List(ref list, _) => { + let list: Vec<&String> = list.iter().map(|s| &s.0).collect(); + list.serialize(s) + } + CV::Table(ref table, _) => table.serialize(s), + CV::Boolean(b, _) => b.serialize(s), + CV::Integer(i, _) => i.serialize(s), + } + } +} + +impl ConfigValue { + fn from_toml(path: &Path, toml: toml::Value) -> CargoResult { + match toml { + toml::Value::String(val) => Ok(CV::String(val, path.to_path_buf())), + toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.to_path_buf())), + toml::Value::Integer(i) => Ok(CV::Integer(i, path.to_path_buf())), + toml::Value::Array(val) => Ok(CV::List( + val.into_iter() + .map(|toml| match toml { + toml::Value::String(val) => Ok((val, path.to_path_buf())), + v => bail!("expected string but found {} in list", v.type_str()), + }) + .collect::>()?, + path.to_path_buf(), + )), + toml::Value::Table(val) => Ok(CV::Table( + val.into_iter() + .map(|(key, value)| { + let value = CV::from_toml(path, value) + .chain_err(|| format!("failed to parse key `{}`", key))?; + Ok((key, value)) + }) + .collect::>()?, + path.to_path_buf(), + )), + v => bail!( + "found TOML configuration value of unknown type `{}`", + v.type_str() + ), + } + } + + fn into_toml(self) -> toml::Value { + match self { + CV::Boolean(s, _) => toml::Value::Boolean(s), + CV::String(s, _) => toml::Value::String(s), + CV::Integer(i, _) => toml::Value::Integer(i), + CV::List(l, _) => { + toml::Value::Array(l.into_iter().map(|(s, _)| toml::Value::String(s)).collect()) + } + CV::Table(l, _) => { + toml::Value::Table(l.into_iter().map(|(k, v)| (k, v.into_toml())).collect()) + } + } + } + + fn merge(&mut self, from: ConfigValue) -> CargoResult<()> { + match (self, from) { + (&mut CV::String(..), CV::String(..)) + | (&mut CV::Integer(..), CV::Integer(..)) + | (&mut CV::Boolean(..), CV::Boolean(..)) => {} + (&mut CV::List(ref mut old, _), CV::List(ref mut new, _)) => { + let new = mem::replace(new, Vec::new()); + old.extend(new.into_iter()); + } + (&mut CV::Table(ref mut old, _), CV::Table(ref mut new, _)) => { + let new = mem::replace(new, HashMap::new()); + for (key, value) in new { + match old.entry(key.clone()) { + Occupied(mut entry) => { + let path = value.definition_path().to_path_buf(); + let entry = entry.get_mut(); + entry.merge(value).chain_err(|| { + format!( + "failed to merge key `{}` between \ + files:\n \ + file 1: {}\n \ + file 2: {}", + key, + entry.definition_path().display(), + path.display() + ) + })?; + } + Vacant(entry) => { + entry.insert(value); + } + }; + } + } + (expected, found) => { + return Err(internal(format!( + "expected {}, but found {}", + expected.desc(), + found.desc() + ))) + } + } + + Ok(()) + } + + pub fn i64(&self, key: &str) -> CargoResult<(i64, &Path)> { + match *self { + CV::Integer(i, ref p) => Ok((i, p)), + _ => self.expected("integer", key), + } + } + + pub fn string(&self, key: &str) -> CargoResult<(&str, &Path)> { + match *self { + CV::String(ref s, ref p) => Ok((s, p)), + _ => self.expected("string", key), + } + } + + pub fn table(&self, key: &str) -> CargoResult<(&HashMap, &Path)> { + match *self { + CV::Table(ref table, ref p) => Ok((table, p)), + _ => self.expected("table", key), + } + } + + pub fn list(&self, key: &str) -> CargoResult<&[(String, PathBuf)]> { + match *self { + CV::List(ref list, _) => Ok(list), + _ => self.expected("list", key), + } + } + + pub fn boolean(&self, key: &str) -> CargoResult<(bool, &Path)> { + match *self { + CV::Boolean(b, ref p) => Ok((b, p)), + _ => self.expected("bool", key), + } + } + + pub fn desc(&self) -> &'static str { + match *self { + CV::Table(..) => "table", + CV::List(..) => "array", + CV::String(..) => "string", + CV::Boolean(..) => "boolean", + CV::Integer(..) => "integer", + } + } + + pub fn definition_path(&self) -> &Path { + match *self { + CV::Boolean(_, ref p) + | CV::Integer(_, ref p) + | CV::String(_, ref p) + | CV::List(_, ref p) + | CV::Table(_, ref p) => p, + } + } + + pub fn expected(&self, wanted: &str, key: &str) -> CargoResult { + bail!( + "expected a {}, but found a {} for `{}` in {}", + wanted, + self.desc(), + key, + self.definition_path().display() + ) + } +} + +impl Definition { + pub fn root<'a>(&'a self, config: &'a Config) -> &'a Path { + match *self { + Definition::Path(ref p) => p.parent().unwrap().parent().unwrap(), + Definition::Environment => config.cwd(), + } + } +} + +impl fmt::Display for Definition { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Definition::Path(ref p) => p.display().fmt(f), + Definition::Environment => "the environment".fmt(f), + } + } +} + +pub fn homedir(cwd: &Path) -> Option { + ::home::cargo_home_with_cwd(cwd).ok() +} + +fn walk_tree(pwd: &Path, mut walk: F) -> CargoResult<()> +where + F: FnMut(&Path) -> CargoResult<()>, +{ + let mut stash: HashSet = HashSet::new(); + + for current in paths::ancestors(pwd) { + let possible = current.join(".cargo").join("config"); + if fs::metadata(&possible).is_ok() { + walk(&possible)?; + stash.insert(possible); + } + } + + // Once we're done, also be sure to walk the home directory even if it's not + // in our history to be sure we pick up that standard location for + // information. + let home = homedir(pwd).ok_or_else(|| { + format_err!( + "Cargo couldn't find your home directory. \ + This probably means that $HOME was not set." + ) + })?; + let config = home.join("config"); + if !stash.contains(&config) && fs::metadata(&config).is_ok() { + walk(&config)?; + } + + Ok(()) +} + +pub fn save_credentials(cfg: &Config, token: String, registry: Option) -> CargoResult<()> { + let mut file = { + cfg.home_path.create_dir()?; + cfg.home_path + .open_rw(Path::new("credentials"), cfg, "credentials' config file")? + }; + + let (key, value) = { + let key = "token".to_string(); + let value = ConfigValue::String(token, file.path().to_path_buf()); + let mut map = HashMap::new(); + map.insert(key, value); + let table = CV::Table(map, file.path().to_path_buf()); + + if let Some(registry) = registry { + let mut map = HashMap::new(); + map.insert(registry, table); + ( + "registries".into(), + CV::Table(map, file.path().to_path_buf()), + ) + } else { + ("registry".into(), table) + } + }; + + let mut contents = String::new(); + file.read_to_string(&mut contents).chain_err(|| { + format!( + "failed to read configuration file `{}`", + file.path().display() + ) + })?; + + let mut toml = cargo_toml::parse(&contents, file.path(), cfg)?; + + // move the old token location to the new one + if let Some(token) = toml.as_table_mut().unwrap().remove("token") { + let mut map = HashMap::new(); + map.insert("token".to_string(), token); + toml.as_table_mut() + .unwrap() + .insert("registry".into(), map.into()); + } + + toml.as_table_mut().unwrap().insert(key, value.into_toml()); + + let contents = toml.to_string(); + file.seek(SeekFrom::Start(0))?; + file.write_all(contents.as_bytes())?; + file.file().set_len(contents.len() as u64)?; + set_permissions(file.file(), 0o600)?; + + return Ok(()); + + #[cfg(unix)] + fn set_permissions(file: &File, mode: u32) -> CargoResult<()> { + use std::os::unix::fs::PermissionsExt; + + let mut perms = file.metadata()?.permissions(); + perms.set_mode(mode); + file.set_permissions(perms)?; + Ok(()) + } + + #[cfg(not(unix))] + #[allow(unused)] + fn set_permissions(file: &File, mode: u32) -> CargoResult<()> { + Ok(()) + } +} diff --git a/src/cargo/util/dependency_queue.rs b/src/cargo/util/dependency_queue.rs new file mode 100644 index 000000000..a52c410e8 --- /dev/null +++ b/src/cargo/util/dependency_queue.rs @@ -0,0 +1,231 @@ +//! A graph-like structure used to represent a set of dependencies and in what +//! order they should be built. +//! +//! This structure is used to store the dependency graph and dynamically update +//! it to figure out when a dependency should be built. + +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; + +pub use self::Freshness::{Dirty, Fresh}; + +#[derive(Debug)] +pub struct DependencyQueue { + /// A list of all known keys to build. + /// + /// The value of the hash map is list of dependencies which still need to be + /// built before the package can be built. Note that the set is dynamically + /// updated as more dependencies are built. + dep_map: HashMap, V)>, + + /// A reverse mapping of a package to all packages that depend on that + /// package. + /// + /// This map is statically known and does not get updated throughout the + /// lifecycle of the DependencyQueue. + reverse_dep_map: HashMap>, + + /// A set of dirty packages. + /// + /// Packages may become dirty over time if their dependencies are rebuilt. + dirty: HashSet, + + /// The packages which are currently being built, waiting for a call to + /// `finish`. + pending: HashSet, + + /// Topological depth of each key + depth: HashMap, +} + +/// Indication of the freshness of a package. +/// +/// A fresh package does not necessarily need to be rebuilt (unless a dependency +/// was also rebuilt), and a dirty package must always be rebuilt. +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum Freshness { + Fresh, + Dirty, +} + +impl Freshness { + pub fn combine(&self, other: Freshness) -> Freshness { + match *self { + Fresh => other, + Dirty => Dirty, + } + } +} + +impl Default for DependencyQueue { + fn default() -> DependencyQueue { + DependencyQueue::new() + } +} + +impl DependencyQueue { + /// Creates a new dependency queue with 0 packages. + pub fn new() -> DependencyQueue { + DependencyQueue { + dep_map: HashMap::new(), + reverse_dep_map: HashMap::new(), + dirty: HashSet::new(), + pending: HashSet::new(), + depth: HashMap::new(), + } + } + + /// Adds a new package to this dependency queue. + /// + /// It is assumed that any dependencies of this package will eventually also + /// be added to the dependency queue. + pub fn queue(&mut self, fresh: Freshness, key: K, value: V, dependencies: &[K]) -> &mut V { + let slot = match self.dep_map.entry(key.clone()) { + Occupied(v) => return &mut v.into_mut().1, + Vacant(v) => v, + }; + + if fresh == Dirty { + self.dirty.insert(key.clone()); + } + + let mut my_dependencies = HashSet::new(); + for dep in dependencies { + my_dependencies.insert(dep.clone()); + let rev = self.reverse_dep_map + .entry(dep.clone()) + .or_insert_with(HashSet::new); + rev.insert(key.clone()); + } + &mut slot.insert((my_dependencies, value)).1 + } + + /// All nodes have been added, calculate some internal metadata and prepare + /// for `dequeue`. + pub fn queue_finished(&mut self) { + for key in self.dep_map.keys() { + depth(key, &self.reverse_dep_map, &mut self.depth); + } + + fn depth( + key: &K, + map: &HashMap>, + results: &mut HashMap, + ) -> usize { + const IN_PROGRESS: usize = !0; + + if let Some(&depth) = results.get(key) { + assert_ne!(depth, IN_PROGRESS, "cycle in DependencyQueue"); + return depth; + } + + results.insert(key.clone(), IN_PROGRESS); + + let depth = 1 + + map.get(&key) + .into_iter() + .flat_map(|it| it) + .map(|dep| depth(dep, map, results)) + .max() + .unwrap_or(0); + + *results.get_mut(key).unwrap() = depth; + + depth + } + } + + /// Dequeues a package that is ready to be built. + /// + /// A package is ready to be built when it has 0 un-built dependencies. If + /// `None` is returned then no packages are ready to be built. + pub fn dequeue(&mut self) -> Option<(Freshness, K, V)> { + // Look at all our crates and find everything that's ready to build (no + // deps). After we've got that candidate set select the one which has + // the maximum depth in the dependency graph. This way we should + // hopefully keep CPUs hottest the longest by ensuring that long + // dependency chains are scheduled early on in the build process and the + // leafs higher in the tree can fill in the cracks later. + // + // TODO: it'd be best here to throw in a heuristic of crate size as + // well. For example how long did this crate historically take to + // compile? How large is its source code? etc. + let next = self.dep_map + .iter() + .filter(|&(_, &(ref deps, _))| deps.is_empty()) + .map(|(key, _)| key.clone()) + .max_by_key(|k| self.depth[k]); + let key = match next { + Some(key) => key, + None => return None, + }; + let (_, data) = self.dep_map.remove(&key).unwrap(); + let fresh = if self.dirty.contains(&key) { + Dirty + } else { + Fresh + }; + self.pending.insert(key.clone()); + Some((fresh, key, data)) + } + + /// Returns whether there are remaining packages to be built. + pub fn is_empty(&self) -> bool { + self.dep_map.is_empty() && self.pending.is_empty() + } + + /// Returns the number of remaining packages to be built. + pub fn len(&self) -> usize { + self.dep_map.len() + self.pending.len() + } + + /// Indicate that a package has been built. + /// + /// This function will update the dependency queue with this information, + /// possibly allowing the next invocation of `dequeue` to return a package. + pub fn finish(&mut self, key: &K, fresh: Freshness) { + assert!(self.pending.remove(key)); + let reverse_deps = match self.reverse_dep_map.get(key) { + Some(deps) => deps, + None => return, + }; + for dep in reverse_deps.iter() { + if fresh == Dirty { + self.dirty.insert(dep.clone()); + } + assert!(self.dep_map.get_mut(dep).unwrap().0.remove(key)); + } + } +} + +#[cfg(test)] +mod test { + use super::{DependencyQueue, Freshness}; + + #[test] + fn deep_first() { + let mut q = DependencyQueue::new(); + + q.queue(Freshness::Fresh, 1, (), &[]); + q.queue(Freshness::Fresh, 2, (), &[1]); + q.queue(Freshness::Fresh, 3, (), &[]); + q.queue(Freshness::Fresh, 4, (), &[2, 3]); + q.queue(Freshness::Fresh, 5, (), &[4, 3]); + q.queue_finished(); + + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 1, ()))); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 3, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&3, Freshness::Fresh); + assert_eq!(q.dequeue(), None); + q.finish(&1, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 2, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&2, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 4, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&4, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 5, ()))); + } +} diff --git a/src/cargo/util/errors.rs b/src/cargo/util/errors.rs new file mode 100644 index 000000000..d3b1d43d0 --- /dev/null +++ b/src/cargo/util/errors.rs @@ -0,0 +1,283 @@ +#![allow(unknown_lints)] + +use std::fmt; +use std::process::{ExitStatus, Output}; +use std::str; + +use core::{TargetKind, Workspace}; +use failure::{Context, Error, Fail}; +use clap; + +pub use failure::Error as CargoError; +pub type CargoResult = Result; + +pub trait CargoResultExt { + fn chain_err(self, f: F) -> Result> + where + F: FnOnce() -> D, + D: fmt::Display + Send + Sync + 'static; +} + +impl CargoResultExt for Result +where + E: Into, +{ + fn chain_err(self, f: F) -> Result> + where + F: FnOnce() -> D, + D: fmt::Display + Send + Sync + 'static, + { + self.map_err(|failure| { + let context = f(); + failure.into().context(context) + }) + } +} + +#[derive(Debug, Fail)] +#[fail(display = "failed to get 200 response from `{}`, got {}", url, code)] +pub struct HttpNot200 { + pub code: u32, + pub url: String, +} + +pub struct Internal { + inner: Error, +} + +impl Internal { + pub fn new(inner: Error) -> Internal { + Internal { inner } + } +} + +impl Fail for Internal { + fn cause(&self) -> Option<&Fail> { + self.inner.cause().cause() + } +} + +impl fmt::Debug for Internal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl fmt::Display for Internal { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +// ============================================================================= +// Process errors +#[derive(Debug, Fail)] +#[fail(display = "{}", desc)] +pub struct ProcessError { + pub desc: String, + pub exit: Option, + pub output: Option, +} + +// ============================================================================= +// Cargo test errors. + +/// Error when testcases fail +#[derive(Debug, Fail)] +#[fail(display = "{}", desc)] +pub struct CargoTestError { + pub test: Test, + pub desc: String, + pub exit: Option, + pub causes: Vec, +} + +#[derive(Debug)] +pub enum Test { + Multiple, + Doc, + UnitTest { + kind: TargetKind, + name: String, + pkg_name: String, + }, +} + +impl CargoTestError { + pub fn new(test: Test, errors: Vec) -> Self { + if errors.is_empty() { + panic!("Cannot create CargoTestError from empty Vec") + } + let desc = errors + .iter() + .map(|error| error.desc.clone()) + .collect::>() + .join("\n"); + CargoTestError { + test, + desc, + exit: errors[0].exit, + causes: errors, + } + } + + pub fn hint(&self, ws: &Workspace) -> String { + match self.test { + Test::UnitTest { + ref kind, + ref name, + ref pkg_name, + } => { + let pkg_info = if ws.members().count() > 1 && ws.is_virtual() { + format!("-p {} ", pkg_name) + } else { + String::new() + }; + + match *kind { + TargetKind::Bench => { + format!("test failed, to rerun pass '{}--bench {}'", pkg_info, name) + } + TargetKind::Bin => { + format!("test failed, to rerun pass '{}--bin {}'", pkg_info, name) + } + TargetKind::Lib(_) => format!("test failed, to rerun pass '{}--lib'", pkg_info), + TargetKind::Test => { + format!("test failed, to rerun pass '{}--test {}'", pkg_info, name) + } + TargetKind::ExampleBin | TargetKind::ExampleLib(_) => { + format!("test failed, to rerun pass '{}--example {}", pkg_info, name) + } + _ => "test failed.".into(), + } + } + Test::Doc => "test failed, to rerun pass '--doc'".into(), + _ => "test failed.".into(), + } + } +} + +// ============================================================================= +// CLI errors + +pub type CliResult = Result<(), CliError>; + +#[derive(Debug)] +pub struct CliError { + pub error: Option, + pub unknown: bool, + pub exit_code: i32, +} + +impl CliError { + pub fn new(error: CargoError, code: i32) -> CliError { + let unknown = error.downcast_ref::().is_some(); + CliError { + error: Some(error), + exit_code: code, + unknown, + } + } + + pub fn code(code: i32) -> CliError { + CliError { + error: None, + exit_code: code, + unknown: false, + } + } +} + +impl From for CliError { + fn from(err: CargoError) -> CliError { + CliError::new(err, 101) + } +} + +impl From for CliError { + fn from(err: clap::Error) -> CliError { + let code = if err.use_stderr() { 1 } else { 0 }; + CliError::new(err.into(), code) + } +} + +// ============================================================================= +// Construction helpers + +pub fn process_error( + msg: &str, + status: Option<&ExitStatus>, + output: Option<&Output>, +) -> ProcessError { + let exit = match status { + Some(s) => status_to_string(s), + None => "never executed".to_string(), + }; + let mut desc = format!("{} ({})", &msg, exit); + + if let Some(out) = output { + match str::from_utf8(&out.stdout) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stdout\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + match str::from_utf8(&out.stderr) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stderr\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + } + + return ProcessError { + desc, + exit: status.cloned(), + output: output.cloned(), + }; + + #[cfg(unix)] + fn status_to_string(status: &ExitStatus) -> String { + use std::os::unix::process::*; + use libc; + + if let Some(signal) = status.signal() { + let name = match signal as libc::c_int { + libc::SIGABRT => ", SIGABRT: process abort signal", + libc::SIGALRM => ", SIGALRM: alarm clock", + libc::SIGFPE => ", SIGFPE: erroneous arithmetic operation", + libc::SIGHUP => ", SIGHUP: hangup", + libc::SIGILL => ", SIGILL: illegal instruction", + libc::SIGINT => ", SIGINT: terminal interrupt signal", + libc::SIGKILL => ", SIGKILL: kill", + libc::SIGPIPE => ", SIGPIPE: write on a pipe with no one to read", + libc::SIGQUIT => ", SIGQUIT: terminal quite signal", + libc::SIGSEGV => ", SIGSEGV: invalid memory reference", + libc::SIGTERM => ", SIGTERM: termination signal", + libc::SIGBUS => ", SIGBUS: access to undefined memory", + #[cfg(not(target_os = "haiku"))] + libc::SIGSYS => ", SIGSYS: bad system call", + libc::SIGTRAP => ", SIGTRAP: trace/breakpoint trap", + _ => "", + }; + format!("signal: {}{}", signal, name) + } else { + status.to_string() + } + } + + #[cfg(windows)] + fn status_to_string(status: &ExitStatus) -> String { + status.to_string() + } +} + +pub fn internal(error: S) -> CargoError { + _internal(&error) +} + +fn _internal(error: &fmt::Display) -> CargoError { + Internal::new(format_err!("{}", error)).into() +} diff --git a/src/cargo/util/flock.rs b/src/cargo/util/flock.rs new file mode 100644 index 000000000..c2929756a --- /dev/null +++ b/src/cargo/util/flock.rs @@ -0,0 +1,346 @@ +use std::fs::{self, File, OpenOptions}; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::io; +use std::path::{Display, Path, PathBuf}; + +use termcolor::Color::Cyan; +use fs2::{lock_contended_error, FileExt}; +#[allow(unused_imports)] +use libc; + +use util::Config; +use util::paths; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +pub struct FileLock { + f: Option, + path: PathBuf, + state: State, +} + +#[derive(PartialEq, Debug)] +enum State { + Unlocked, + Shared, + Exclusive, +} + +impl FileLock { + /// Returns the underlying file handle of this lock. + pub fn file(&self) -> &File { + self.f.as_ref().unwrap() + } + + /// Returns the underlying path that this lock points to. + /// + /// Note that special care must be taken to ensure that the path is not + /// referenced outside the lifetime of this lock. + pub fn path(&self) -> &Path { + assert_ne!(self.state, State::Unlocked); + &self.path + } + + /// Returns the parent path containing this file + pub fn parent(&self) -> &Path { + assert_ne!(self.state, State::Unlocked); + self.path.parent().unwrap() + } + + /// Removes all sibling files to this locked file. + /// + /// This can be useful if a directory is locked with a sentinel file but it + /// needs to be cleared out as it may be corrupt. + pub fn remove_siblings(&self) -> CargoResult<()> { + let path = self.path(); + for entry in path.parent().unwrap().read_dir()? { + let entry = entry?; + if Some(&entry.file_name()[..]) == path.file_name() { + continue; + } + let kind = entry.file_type()?; + if kind.is_dir() { + paths::remove_dir_all(entry.path())?; + } else { + paths::remove_file(entry.path())?; + } + } + Ok(()) + } +} + +impl Read for FileLock { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.file().read(buf) + } +} + +impl Seek for FileLock { + fn seek(&mut self, to: SeekFrom) -> io::Result { + self.file().seek(to) + } +} + +impl Write for FileLock { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.file().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.file().flush() + } +} + +impl Drop for FileLock { + fn drop(&mut self) { + if self.state != State::Unlocked { + if let Some(f) = self.f.take() { + let _ = f.unlock(); + } + } + } +} + +/// A "filesystem" is intended to be a globally shared, hence locked, resource +/// in Cargo. +/// +/// The `Path` of a filesystem cannot be learned unless it's done in a locked +/// fashion, and otherwise functions on this structure are prepared to handle +/// concurrent invocations across multiple instances of Cargo. +#[derive(Clone, Debug)] +pub struct Filesystem { + root: PathBuf, +} + +impl Filesystem { + /// Creates a new filesystem to be rooted at the given path. + pub fn new(path: PathBuf) -> Filesystem { + Filesystem { root: path } + } + + /// Like `Path::join`, creates a new filesystem rooted at this filesystem + /// joined with the given path. + pub fn join>(&self, other: T) -> Filesystem { + Filesystem::new(self.root.join(other)) + } + + /// Like `Path::push`, pushes a new path component onto this filesystem. + pub fn push>(&mut self, other: T) { + self.root.push(other); + } + + /// Consumes this filesystem and returns the underlying `PathBuf`. + /// + /// Note that this is a relatively dangerous operation and should be used + /// with great caution!. + pub fn into_path_unlocked(self) -> PathBuf { + self.root + } + + /// Creates the directory pointed to by this filesystem. + /// + /// Handles errors where other Cargo processes are also attempting to + /// concurrently create this directory. + pub fn create_dir(&self) -> io::Result<()> { + fs::create_dir_all(&self.root) + } + + /// Returns an adaptor that can be used to print the path of this + /// filesystem. + pub fn display(&self) -> Display { + self.root.display() + } + + /// Opens exclusive access to a file, returning the locked version of a + /// file. + /// + /// This function will create a file at `path` if it doesn't already exist + /// (including intermediate directories), and then it will acquire an + /// exclusive lock on `path`. If the process must block waiting for the + /// lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has + /// read/write access to the underlying file. + pub fn open_rw

(&self, path: P, config: &Config, msg: &str) -> CargoResult + where + P: AsRef, + { + self.open( + path.as_ref(), + OpenOptions::new().read(true).write(true).create(true), + State::Exclusive, + config, + msg, + ) + } + + /// Opens shared access to a file, returning the locked version of a file. + /// + /// This function will fail if `path` doesn't already exist, but if it does + /// then it will acquire a shared lock on `path`. If the process must block + /// waiting for the lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has read + /// access to the underlying file. Any writes to the file will return an + /// error. + pub fn open_ro

(&self, path: P, config: &Config, msg: &str) -> CargoResult + where + P: AsRef, + { + self.open( + path.as_ref(), + OpenOptions::new().read(true), + State::Shared, + config, + msg, + ) + } + + fn open( + &self, + path: &Path, + opts: &OpenOptions, + state: State, + config: &Config, + msg: &str, + ) -> CargoResult { + let path = self.root.join(path); + + // If we want an exclusive lock then if we fail because of NotFound it's + // likely because an intermediate directory didn't exist, so try to + // create the directory and then continue. + let f = opts.open(&path) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound && state == State::Exclusive { + fs::create_dir_all(path.parent().unwrap())?; + opts.open(&path) + } else { + Err(e) + } + }) + .chain_err(|| format!("failed to open: {}", path.display()))?; + match state { + State::Exclusive => { + acquire(config, msg, &path, &|| f.try_lock_exclusive(), &|| { + f.lock_exclusive() + })?; + } + State::Shared => { + acquire(config, msg, &path, &|| f.try_lock_shared(), &|| { + f.lock_shared() + })?; + } + State::Unlocked => {} + } + Ok(FileLock { + f: Some(f), + path, + state, + }) + } +} + +impl PartialEq for Filesystem { + fn eq(&self, other: &Path) -> bool { + self.root == other + } +} + +impl PartialEq for Path { + fn eq(&self, other: &Filesystem) -> bool { + self == other.root + } +} + +/// Acquires a lock on a file in a "nice" manner. +/// +/// Almost all long-running blocking actions in Cargo have a status message +/// associated with them as we're not sure how long they'll take. Whenever a +/// conflicted file lock happens, this is the case (we're not sure when the lock +/// will be released). +/// +/// This function will acquire the lock on a `path`, printing out a nice message +/// to the console if we have to wait for it. It will first attempt to use `try` +/// to acquire a lock on the crate, and in the case of contention it will emit a +/// status message based on `msg` to `config`'s shell, and then use `block` to +/// block waiting to acquire a lock. +/// +/// Returns an error if the lock could not be acquired or if any error other +/// than a contention error happens. +fn acquire( + config: &Config, + msg: &str, + path: &Path, + try: &Fn() -> io::Result<()>, + block: &Fn() -> io::Result<()>, +) -> CargoResult<()> { + // File locking on Unix is currently implemented via `flock`, which is known + // to be broken on NFS. We could in theory just ignore errors that happen on + // NFS, but apparently the failure mode [1] for `flock` on NFS is **blocking + // forever**, even if the nonblocking flag is passed! + // + // As a result, we just skip all file locks entirely on NFS mounts. That + // should avoid calling any `flock` functions at all, and it wouldn't work + // there anyway. + // + // [1]: https://github.com/rust-lang/cargo/issues/2615 + if is_on_nfs_mount(path) { + return Ok(()); + } + + match try() { + Ok(()) => return Ok(()), + + // In addition to ignoring NFS which is commonly not working we also + // just ignore locking on filesystems that look like they don't + // implement file locking. We detect that here via the return value of + // locking (e.g. inspecting errno). + #[cfg(unix)] + Err(ref e) if e.raw_os_error() == Some(libc::ENOTSUP) => + { + return Ok(()) + } + + #[cfg(target_os = "linux")] + Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => + { + return Ok(()) + } + + Err(e) => { + if e.raw_os_error() != lock_contended_error().raw_os_error() { + let e = CargoError::from(e); + let cx = format!("failed to lock file: {}", path.display()); + return Err(e.context(cx).into()); + } + } + } + let msg = format!("waiting for file lock on {}", msg); + config.shell().status_with_color("Blocking", &msg, Cyan)?; + + block().chain_err(|| format!("failed to lock file: {}", path.display()))?; + return Ok(()); + + #[cfg(all(target_os = "linux", not(target_env = "musl")))] + fn is_on_nfs_mount(path: &Path) -> bool { + use std::ffi::CString; + use std::mem; + use std::os::unix::prelude::*; + + let path = match CString::new(path.as_os_str().as_bytes()) { + Ok(path) => path, + Err(_) => return false, + }; + + unsafe { + let mut buf: libc::statfs = mem::zeroed(); + let r = libc::statfs(path.as_ptr(), &mut buf); + + r == 0 && buf.f_type as u32 == libc::NFS_SUPER_MAGIC as u32 + } + } + + #[cfg(any(not(target_os = "linux"), target_env = "musl"))] + fn is_on_nfs_mount(_path: &Path) -> bool { + false + } +} diff --git a/src/cargo/util/graph.rs b/src/cargo/util/graph.rs new file mode 100644 index 000000000..fcaef4e60 --- /dev/null +++ b/src/cargo/util/graph.rs @@ -0,0 +1,136 @@ +use std::fmt; +use std::hash::Hash; +use std::collections::hash_map::{HashMap, Iter, Keys}; + +pub struct Graph { + nodes: HashMap>, +} + +enum Mark { + InProgress, + Done, +} + +pub type Nodes<'a, N, E> = Keys<'a, N, HashMap>; +pub type Edges<'a, N, E> = Iter<'a, N, E>; + +impl Graph { + pub fn new() -> Graph { + Graph { + nodes: HashMap::new(), + } + } + + pub fn add(&mut self, node: N) { + self.nodes.entry(node).or_insert_with(HashMap::new); + } + + pub fn link(&mut self, node: N, child: N) -> &mut E { + self.nodes + .entry(node) + .or_insert_with(HashMap::new) + .entry(child) + .or_insert_with(Default::default) + } + + pub fn edge(&self, from: &N, to: &N) -> Option<&E> { + self.nodes.get(from)?.get(to) + } + + pub fn edges(&self, from: &N) -> Option> { + self.nodes.get(from).map(|set| set.iter()) + } + + pub fn sort(&self) -> Option> { + let mut ret = Vec::new(); + let mut marks = HashMap::new(); + + for node in self.nodes.keys() { + self.visit(node, &mut ret, &mut marks); + } + + Some(ret) + } + + fn visit(&self, node: &N, dst: &mut Vec, marks: &mut HashMap) { + if marks.contains_key(node) { + return; + } + + marks.insert(node.clone(), Mark::InProgress); + + for child in self.nodes[node].keys() { + self.visit(child, dst, marks); + } + + dst.push(node.clone()); + marks.insert(node.clone(), Mark::Done); + } + + pub fn iter(&self) -> Nodes { + self.nodes.keys() + } + + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top<'a>(&'a self, mut pkg: &'a N) -> Vec<&'a N> { + // Note that this implementation isn't the most robust per se, we'll + // likely have to tweak this over time. For now though it works for what + // it's used for! + let mut result = vec![pkg]; + let first_pkg_depending_on = |pkg: &N, res: &[&N]| { + self.nodes + .iter() + .filter(|&(_node, adjacent)| adjacent.contains_key(pkg)) + // Note that we can have "cycles" introduced through dev-dependency + // edges, so make sure we don't loop infinitely. + .filter(|&(node, _)| !res.contains(&node)) + .next() + .map(|p| p.0) + }; + while let Some(p) = first_pkg_depending_on(pkg, &result) { + result.push(p); + pkg = p; + } + result + } +} + +impl Default for Graph { + fn default() -> Graph { + Graph::new() + } +} + +impl fmt::Debug for Graph { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + writeln!(fmt, "Graph {{")?; + + for (n, e) in &self.nodes { + writeln!(fmt, " - {}", n)?; + + for n in e.keys() { + writeln!(fmt, " - {}", n)?; + } + } + + write!(fmt, "}}")?; + + Ok(()) + } +} + +impl PartialEq for Graph { + fn eq(&self, other: &Graph) -> bool { + self.nodes.eq(&other.nodes) + } +} +impl Eq for Graph {} + +impl Clone for Graph { + fn clone(&self) -> Graph { + Graph { + nodes: self.nodes.clone(), + } + } +} diff --git a/src/cargo/util/hex.rs b/src/cargo/util/hex.rs new file mode 100644 index 000000000..7e4dd00e9 --- /dev/null +++ b/src/cargo/util/hex.rs @@ -0,0 +1,27 @@ +#![allow(deprecated)] + +use hex; +use std::hash::{Hash, Hasher, SipHasher}; + +pub fn to_hex(num: u64) -> String { + hex::encode(&[ + (num >> 0) as u8, + (num >> 8) as u8, + (num >> 16) as u8, + (num >> 24) as u8, + (num >> 32) as u8, + (num >> 40) as u8, + (num >> 48) as u8, + (num >> 56) as u8, + ]) +} + +pub fn hash_u64(hashable: &H) -> u64 { + let mut hasher = SipHasher::new_with_keys(0, 0); + hashable.hash(&mut hasher); + hasher.finish() +} + +pub fn short_hash(hashable: &H) -> String { + to_hex(hash_u64(hashable)) +} diff --git a/src/cargo/util/important_paths.rs b/src/cargo/util/important_paths.rs new file mode 100644 index 000000000..2fb4dea59 --- /dev/null +++ b/src/cargo/util/important_paths.rs @@ -0,0 +1,32 @@ +use std::fs; +use std::path::{Path, PathBuf}; +use util::errors::CargoResult; +use util::paths; + +/// Find the root Cargo.toml +pub fn find_root_manifest_for_wd(cwd: &Path) -> CargoResult { + let file = "Cargo.toml"; + for current in paths::ancestors(cwd) { + let manifest = current.join(file); + if fs::metadata(&manifest).is_ok() { + return Ok(manifest); + } + } + + bail!( + "could not find `{}` in `{}` or any parent directory", + file, + cwd.display() + ) +} + +/// Return the path to the `file` in `pwd`, if it exists. +pub fn find_project_manifest_exact(pwd: &Path, file: &str) -> CargoResult { + let manifest = pwd.join(file); + + if manifest.exists() { + Ok(manifest) + } else { + bail!("Could not find `{}` in `{}`", file, pwd.display()) + } +} diff --git a/src/cargo/util/job.rs b/src/cargo/util/job.rs new file mode 100644 index 000000000..0df14e061 --- /dev/null +++ b/src/cargo/util/job.rs @@ -0,0 +1,271 @@ +//! Job management (mostly for windows) +//! +//! Most of the time when you're running cargo you expect Ctrl-C to actually +//! terminate the entire tree of processes in play, not just the one at the top +//! (cago). This currently works "by default" on Unix platforms because Ctrl-C +//! actually sends a signal to the *process group* rather than the parent +//! process, so everything will get torn down. On Windows, however, this does +//! not happen and Ctrl-C just kills cargo. +//! +//! To achieve the same semantics on Windows we use Job Objects to ensure that +//! all processes die at the same time. Job objects have a mode of operation +//! where when all handles to the object are closed it causes all child +//! processes associated with the object to be terminated immediately. +//! Conveniently whenever a process in the job object spawns a new process the +//! child will be associated with the job object as well. This means if we add +//! ourselves to the job object we create then everything will get torn down! + +pub use self::imp::Setup; + +pub fn setup() -> Option { + unsafe { imp::setup() } +} + +#[cfg(unix)] +mod imp { + use std::env; + use libc; + + pub type Setup = (); + + pub unsafe fn setup() -> Option<()> { + // There's a test case for the behavior of + // when-cargo-is-killed-subprocesses-are-also-killed, but that requires + // one cargo spawned to become its own session leader, so we do that + // here. + if env::var("__CARGO_TEST_SETSID_PLEASE_DONT_USE_ELSEWHERE").is_ok() { + libc::setsid(); + } + Some(()) + } +} + +#[cfg(windows)] +mod imp { + extern crate winapi; + + use std::ffi::OsString; + use std::io; + use std::mem; + use std::os::windows::prelude::*; + + use self::winapi::shared::basetsd::*; + use self::winapi::shared::minwindef::*; + use self::winapi::shared::minwindef::{FALSE, TRUE}; + use self::winapi::um::handleapi::*; + use self::winapi::um::jobapi2::*; + use self::winapi::um::jobapi::*; + use self::winapi::um::processthreadsapi::*; + use self::winapi::um::psapi::*; + use self::winapi::um::synchapi::*; + use self::winapi::um::winbase::*; + use self::winapi::um::winnt::*; + use self::winapi::um::winnt::HANDLE; + + pub struct Setup { + job: Handle, + } + + pub struct Handle { + inner: HANDLE, + } + + fn last_err() -> io::Error { + io::Error::last_os_error() + } + + pub unsafe fn setup() -> Option { + // Creates a new job object for us to use and then adds ourselves to it. + // Note that all errors are basically ignored in this function, + // intentionally. Job objects are "relatively new" in Windows, + // particularly the ability to support nested job objects. Older + // Windows installs don't support this ability. We probably don't want + // to force Cargo to abort in this situation or force others to *not* + // use job objects, so we instead just ignore errors and assume that + // we're otherwise part of someone else's job object in this case. + + let job = CreateJobObjectW(0 as *mut _, 0 as *const _); + if job.is_null() { + return None; + } + let job = Handle { inner: job }; + + // Indicate that when all handles to the job object are gone that all + // process in the object should be killed. Note that this includes our + // entire process tree by default because we've added ourselves and and + // our children will reside in the job once we spawn a process. + let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + let r = SetInformationJobObject( + job.inner, + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) as DWORD, + ); + if r == 0 { + return None; + } + + // Assign our process to this job object, meaning that our children will + // now live or die based on our existence. + let me = GetCurrentProcess(); + let r = AssignProcessToJobObject(job.inner, me); + if r == 0 { + return None; + } + + Some(Setup { job }) + } + + impl Drop for Setup { + fn drop(&mut self) { + // This is a litte subtle. By default if we are terminated then all + // processes in our job object are terminated as well, but we + // intentionally want to whitelist some processes to outlive our job + // object (see below). + // + // To allow for this, we manually kill processes instead of letting + // the job object kill them for us. We do this in a loop to handle + // processes spawning other processes. + // + // Finally once this is all done we know that the only remaining + // ones are ourselves and the whitelisted processes. The destructor + // here then configures our job object to *not* kill everything on + // close, then closes the job object. + unsafe { + while self.kill_remaining() { + info!("killed some, going for more"); + } + + let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + let r = SetInformationJobObject( + self.job.inner, + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) as DWORD, + ); + if r == 0 { + info!("failed to configure job object to defaults: {}", last_err()); + } + } + } + } + + impl Setup { + unsafe fn kill_remaining(&mut self) -> bool { + #[repr(C)] + struct Jobs { + header: JOBOBJECT_BASIC_PROCESS_ID_LIST, + list: [ULONG_PTR; 1024], + } + + let mut jobs: Jobs = mem::zeroed(); + let r = QueryInformationJobObject( + self.job.inner, + JobObjectBasicProcessIdList, + &mut jobs as *mut _ as LPVOID, + mem::size_of_val(&jobs) as DWORD, + 0 as *mut _, + ); + if r == 0 { + info!("failed to query job object: {}", last_err()); + return false; + } + + let mut killed = false; + let list = &jobs.list[..jobs.header.NumberOfProcessIdsInList as usize]; + assert!(list.len() > 0); + info!("found {} remaining processes", list.len() - 1); + + let list = list.iter() + .filter(|&&id| { + // let's not kill ourselves + id as DWORD != GetCurrentProcessId() + }) + .filter_map(|&id| { + // Open the process with the necessary rights, and if this + // fails then we probably raced with the process exiting so we + // ignore the problem. + let flags = PROCESS_QUERY_INFORMATION | PROCESS_TERMINATE | SYNCHRONIZE; + let p = OpenProcess(flags, FALSE, id as DWORD); + if p.is_null() { + None + } else { + Some(Handle { inner: p }) + } + }) + .filter(|p| { + // Test if this process was actually in the job object or not. + // If it's not then we likely raced with something else + // recycling this PID, so we just skip this step. + let mut res = 0; + let r = IsProcessInJob(p.inner, self.job.inner, &mut res); + if r == 0 { + info!("failed to test is process in job: {}", last_err()); + return false; + } + res == TRUE + }); + + for p in list { + // Load the file which this process was spawned from. We then + // later use this for identification purposes. + let mut buf = [0; 1024]; + let r = GetProcessImageFileNameW(p.inner, buf.as_mut_ptr(), buf.len() as DWORD); + if r == 0 { + info!("failed to get image name: {}", last_err()); + continue; + } + let s = OsString::from_wide(&buf[..r as usize]); + info!("found remaining: {:?}", s); + + // And here's where we find the whole purpose for this + // function! Currently, our only whitelisted process is + // `mspdbsrv.exe`, and more details about that can be found + // here: + // + // https://github.com/rust-lang/rust/issues/33145 + // + // The gist of it is that all builds on one machine use the + // same `mspdbsrv.exe` instance. If we were to kill this + // instance then we could erroneously cause other builds to + // fail. + if let Some(s) = s.to_str() { + if s.contains("mspdbsrv") { + info!("\toops, this is mspdbsrv"); + continue; + } + } + + // Ok, this isn't mspdbsrv, let's kill the process. After we + // kill it we wait on it to ensure that the next time around in + // this function we're not going to see it again. + let r = TerminateProcess(p.inner, 1); + if r == 0 { + info!("\tfailed to kill subprocess: {}", last_err()); + info!("\tassuming subprocess is dead..."); + } else { + info!("\tterminated subprocess"); + } + let r = WaitForSingleObject(p.inner, INFINITE); + if r != 0 { + info!("failed to wait for process to die: {}", last_err()); + return false; + } + killed = true; + } + + killed + } + } + + impl Drop for Handle { + fn drop(&mut self) { + unsafe { + CloseHandle(self.inner); + } + } + } +} diff --git a/src/cargo/util/lev_distance.rs b/src/cargo/util/lev_distance.rs new file mode 100644 index 000000000..1d1eae05a --- /dev/null +++ b/src/cargo/util/lev_distance.rs @@ -0,0 +1,56 @@ +use std::cmp; + +pub fn lev_distance(me: &str, t: &str) -> usize { + if me.is_empty() { + return t.chars().count(); + } + if t.is_empty() { + return me.chars().count(); + } + + let mut dcol = (0..t.len() + 1).collect::>(); + let mut t_last = 0; + + for (i, sc) in me.chars().enumerate() { + let mut current = i; + dcol[0] = current + 1; + + for (j, tc) in t.chars().enumerate() { + let next = dcol[j + 1]; + + if sc == tc { + dcol[j + 1] = current; + } else { + dcol[j + 1] = cmp::min(current, next); + dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1; + } + + current = next; + t_last = j; + } + } + + dcol[t_last + 1] +} + +#[test] +fn test_lev_distance() { + use std::char::{from_u32, MAX}; + // Test bytelength agnosticity + for c in (0u32..MAX as u32) + .filter_map(|i| from_u32(i)) + .map(|i| i.to_string()) + { + assert_eq!(lev_distance(&c, &c), 0); + } + + let a = "\nMäry häd ä little lämb\n\nLittle lämb\n"; + let b = "\nMary häd ä little lämb\n\nLittle lämb\n"; + let c = "Mary häd ä little lämb\n\nLittle lämb\n"; + assert_eq!(lev_distance(a, b), 1); + assert_eq!(lev_distance(b, a), 1); + assert_eq!(lev_distance(a, c), 2); + assert_eq!(lev_distance(c, a), 2); + assert_eq!(lev_distance(b, c), 1); + assert_eq!(lev_distance(c, b), 1); +} diff --git a/src/cargo/util/machine_message.rs b/src/cargo/util/machine_message.rs new file mode 100644 index 000000000..3104d4b60 --- /dev/null +++ b/src/cargo/util/machine_message.rs @@ -0,0 +1,70 @@ +use serde::ser; +use serde_json::{self, Value}; + +use core::{PackageId, Target}; + +pub trait Message: ser::Serialize { + fn reason(&self) -> &str; +} + +pub fn emit(t: &T) { + let mut json: Value = serde_json::to_value(t).unwrap(); + json["reason"] = json!(t.reason()); + println!("{}", json); +} + +#[derive(Serialize)] +pub struct FromCompiler<'a> { + pub package_id: &'a PackageId, + pub target: &'a Target, + pub message: serde_json::Value, +} + +impl<'a> Message for FromCompiler<'a> { + fn reason(&self) -> &str { + "compiler-message" + } +} + +#[derive(Serialize)] +pub struct Artifact<'a> { + pub package_id: &'a PackageId, + pub target: &'a Target, + pub profile: ArtifactProfile, + pub features: Vec, + pub filenames: Vec, + pub fresh: bool, +} + +impl<'a> Message for Artifact<'a> { + fn reason(&self) -> &str { + "compiler-artifact" + } +} + +/// This is different from the regular `Profile` to maintain backwards +/// compatibility (in particular, `test` is no longer in `Profile`, but we +/// still want it to be included here). +#[derive(Serialize)] +pub struct ArtifactProfile { + pub opt_level: &'static str, + pub debuginfo: Option, + pub debug_assertions: bool, + pub overflow_checks: bool, + pub test: bool, +} + +#[derive(Serialize)] +pub struct BuildScript<'a> { + pub package_id: &'a PackageId, + pub linked_libs: &'a [String], + pub linked_paths: &'a [String], + pub cfgs: &'a [String], + pub env: &'a [(String, String)], +} + +impl<'a> Message for BuildScript<'a> { + fn reason(&self) -> &str { + "build-script-executed" + } +} diff --git a/src/cargo/util/mod.rs b/src/cargo/util/mod.rs new file mode 100644 index 000000000..2d9505d9a --- /dev/null +++ b/src/cargo/util/mod.rs @@ -0,0 +1,44 @@ +pub use self::cfg::{Cfg, CfgExpr}; +pub use self::config::{homedir, Config, ConfigValue}; +pub use self::dependency_queue::{DependencyQueue, Dirty, Fresh, Freshness}; +pub use self::errors::{CargoError, CargoResult, CargoResultExt, CliResult, Test}; +pub use self::errors::{CargoTestError, CliError, ProcessError}; +pub use self::errors::{internal, process_error}; +pub use self::flock::{FileLock, Filesystem}; +pub use self::graph::Graph; +pub use self::hex::{short_hash, to_hex, hash_u64}; +pub use self::lev_distance::lev_distance; +pub use self::paths::{dylib_path, join_paths, bytes2path, path2bytes}; +pub use self::paths::{dylib_path_envvar, normalize_path, without_prefix}; +pub use self::process_builder::{process, ProcessBuilder}; +pub use self::rustc::Rustc; +pub use self::sha256::Sha256; +pub use self::to_semver::ToSemver; +pub use self::to_url::ToUrl; +pub use self::vcs::{FossilRepo, GitRepo, HgRepo, PijulRepo}; +pub use self::read2::read2; +pub use self::progress::Progress; + +pub mod config; +pub mod errors; +pub mod graph; +pub mod hex; +pub mod important_paths; +pub mod job; +pub mod lev_distance; +pub mod machine_message; +pub mod network; +pub mod paths; +pub mod process_builder; +pub mod profile; +pub mod to_semver; +pub mod to_url; +pub mod toml; +mod cfg; +mod dependency_queue; +mod rustc; +mod sha256; +mod vcs; +mod flock; +mod read2; +mod progress; diff --git a/src/cargo/util/network.rs b/src/cargo/util/network.rs new file mode 100644 index 000000000..e789a929d --- /dev/null +++ b/src/cargo/util/network.rs @@ -0,0 +1,106 @@ +use curl; +use git2; + +use failure::Error; + +use util::Config; +use util::errors::{CargoResult, HttpNot200}; + +fn maybe_spurious(err: &Error) -> bool { + for e in err.causes() { + if let Some(git_err) = e.downcast_ref::() { + match git_err.class() { + git2::ErrorClass::Net | git2::ErrorClass::Os => return true, + _ => (), + } + } + if let Some(curl_err) = e.downcast_ref::() { + if curl_err.is_couldnt_connect() || curl_err.is_couldnt_resolve_proxy() + || curl_err.is_couldnt_resolve_host() + || curl_err.is_operation_timedout() || curl_err.is_recv_error() + { + return true; + } + } + if let Some(not_200) = e.downcast_ref::() { + if 500 <= not_200.code && not_200.code < 600 { + return true; + } + } + } + false +} + +/// Wrapper method for network call retry logic. +/// +/// Retry counts provided by Config object `net.retry`. Config shell outputs +/// a warning on per retry. +/// +/// Closure must return a `CargoResult`. +/// +/// # Examples +/// +/// ```ignore +/// use util::network; +/// cargo_result = network::with_retry(&config, || something.download()); +/// ``` +pub fn with_retry(config: &Config, mut callback: F) -> CargoResult +where + F: FnMut() -> CargoResult, +{ + let mut remaining = config.net_retry()?; + loop { + match callback() { + Ok(ret) => return Ok(ret), + Err(ref e) if maybe_spurious(e) && remaining > 0 => { + let msg = format!( + "spurious network error ({} tries \ + remaining): {}", + remaining, e + ); + config.shell().warn(msg)?; + remaining -= 1; + } + //todo impl from + Err(e) => return Err(e.into()), + } + } +} +#[test] +fn with_retry_repeats_the_call_then_works() { + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + let error1 = HttpNot200 { + code: 501, + url: "Uri".to_string(), + }.into(); + let error2 = HttpNot200 { + code: 502, + url: "Uri".to_string(), + }.into(); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} + +#[test] +fn with_retry_finds_nested_spurious_errors() { + use util::CargoError; + + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + //String error messages are not considered spurious + let error1 = CargoError::from(HttpNot200 { + code: 501, + url: "Uri".to_string(), + }); + let error1 = CargoError::from(error1.context("A non-spurious wrapping err")); + let error2 = CargoError::from(HttpNot200 { + code: 502, + url: "Uri".to_string(), + }); + let error2 = CargoError::from(error2.context("A second chained error")); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} diff --git a/src/cargo/util/paths.rs b/src/cargo/util/paths.rs new file mode 100644 index 000000000..de87a6a3c --- /dev/null +++ b/src/cargo/util/paths.rs @@ -0,0 +1,292 @@ +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs::{self, File, OpenOptions}; +use std::io; +use std::io::prelude::*; +use std::path::{Component, Path, PathBuf}; +use std::iter; + +use filetime::FileTime; + +use util::errors::{CargoError, CargoResult, CargoResultExt, Internal}; + +pub fn join_paths>(paths: &[T], env: &str) -> CargoResult { + let err = match env::join_paths(paths.iter()) { + Ok(paths) => return Ok(paths), + Err(e) => e, + }; + let paths = paths.iter().map(Path::new).collect::>(); + let err = CargoError::from(err); + let explain = Internal::new(format_err!("failed to join path array: {:?}", paths)); + let err = CargoError::from(err.context(explain)); + let more_explain = format!( + "failed to join search paths together\n\ + Does ${} have an unterminated quote character?", + env + ); + Err(err.context(more_explain).into()) +} + +pub fn dylib_path_envvar() -> &'static str { + if cfg!(windows) { + "PATH" + } else if cfg!(target_os = "macos") { + "DYLD_LIBRARY_PATH" + } else { + "LD_LIBRARY_PATH" + } +} + +pub fn dylib_path() -> Vec { + match env::var_os(dylib_path_envvar()) { + Some(var) => env::split_paths(&var).collect(), + None => Vec::new(), + } +} + +pub fn normalize_path(path: &Path) -> PathBuf { + let mut components = path.components().peekable(); + let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { + components.next(); + PathBuf::from(c.as_os_str()) + } else { + PathBuf::new() + }; + + for component in components { + match component { + Component::Prefix(..) => unreachable!(), + Component::RootDir => { + ret.push(component.as_os_str()); + } + Component::CurDir => {} + Component::ParentDir => { + ret.pop(); + } + Component::Normal(c) => { + ret.push(c); + } + } + } + ret +} + +pub fn without_prefix<'a>(long_path: &'a Path, prefix: &'a Path) -> Option<&'a Path> { + let mut a = long_path.components(); + let mut b = prefix.components(); + loop { + match b.next() { + Some(y) => match a.next() { + Some(x) if x == y => continue, + _ => return None, + }, + None => return Some(a.as_path()), + } + } +} + +pub fn resolve_executable(exec: &Path) -> CargoResult { + if exec.components().count() == 1 { + let paths = env::var_os("PATH").ok_or(format_err!("no PATH"))?; + let candidates = env::split_paths(&paths).flat_map(|path| { + let candidate = PathBuf::from(path).join(&exec); + let with_exe = if env::consts::EXE_EXTENSION == "" { + None + } else { + Some(candidate.with_extension(env::consts::EXE_EXTENSION)) + }; + iter::once(candidate).chain(with_exe) + }); + for candidate in candidates { + if candidate.is_file() { + // PATH may have a component like "." in it, so we still need to + // canonicalize. + return Ok(candidate.canonicalize()?); + } + } + + bail!("no executable for `{}` found in PATH", exec.display()) + } else { + Ok(exec.canonicalize()?) + } +} + +pub fn read(path: &Path) -> CargoResult { + match String::from_utf8(read_bytes(path)?) { + Ok(s) => Ok(s), + Err(_) => bail!("path at `{}` was not valid utf-8", path.display()), + } +} + +pub fn read_bytes(path: &Path) -> CargoResult> { + let res = (|| -> CargoResult<_> { + let mut ret = Vec::new(); + let mut f = File::open(path)?; + if let Ok(m) = f.metadata() { + ret.reserve(m.len() as usize + 1); + } + f.read_to_end(&mut ret)?; + Ok(ret) + })() + .chain_err(|| format!("failed to read `{}`", path.display()))?; + Ok(res) +} + +pub fn write(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = File::create(path)?; + f.write_all(contents)?; + Ok(()) + })() + .chain_err(|| format!("failed to write `{}`", path.display()))?; + Ok(()) +} + +pub fn append(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = OpenOptions::new() + .write(true) + .append(true) + .create(true) + .open(path)?; + + f.write_all(contents)?; + Ok(()) + })() + .chain_err(|| format!("failed to write `{}`", path.display()))?; + Ok(()) +} + +pub fn mtime(path: &Path) -> CargoResult { + let meta = fs::metadata(path).chain_err(|| format!("failed to stat `{}`", path.display()))?; + Ok(FileTime::from_last_modification_time(&meta)) +} + +#[cfg(unix)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + use std::os::unix::prelude::*; + Ok(path.as_os_str().as_bytes()) +} +#[cfg(windows)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + match path.as_os_str().to_str() { + Some(s) => Ok(s.as_bytes()), + None => Err(format_err!("invalid non-unicode path: {}", path.display())), + } +} + +#[cfg(unix)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::os::unix::prelude::*; + use std::ffi::OsStr; + Ok(PathBuf::from(OsStr::from_bytes(bytes))) +} +#[cfg(windows)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(bytes) { + Ok(s) => Ok(PathBuf::from(s)), + Err(..) => Err(format_err!("invalid non-unicode path")), + } +} + +pub fn ancestors(path: &Path) -> PathAncestors { + PathAncestors::new(path) +} + +pub struct PathAncestors<'a> { + current: Option<&'a Path>, + stop_at: Option, +} + +impl<'a> PathAncestors<'a> { + fn new(path: &Path) -> PathAncestors { + PathAncestors { + current: Some(path), + //HACK: avoid reading `~/.cargo/config` when testing Cargo itself. + stop_at: env::var("__CARGO_TEST_ROOT").ok().map(PathBuf::from), + } + } +} + +impl<'a> Iterator for PathAncestors<'a> { + type Item = &'a Path; + + fn next(&mut self) -> Option<&'a Path> { + if let Some(path) = self.current { + self.current = path.parent(); + + if let Some(ref stop_at) = self.stop_at { + if path == stop_at { + self.current = None; + } + } + + Some(path) + } else { + None + } + } +} + +pub fn remove_dir_all>(p: P) -> CargoResult<()> { + _remove_dir_all(p.as_ref()) +} + +fn _remove_dir_all(p: &Path) -> CargoResult<()> { + if p.symlink_metadata()?.file_type().is_symlink() { + return remove_file(p); + } + let entries = p.read_dir() + .chain_err(|| format!("failed to read directory `{}`", p.display()))?; + for entry in entries { + let entry = entry?; + let path = entry.path(); + if entry.file_type()?.is_dir() { + remove_dir_all(&path)?; + } else { + remove_file(&path)?; + } + } + remove_dir(&p) +} + +pub fn remove_dir>(p: P) -> CargoResult<()> { + _remove_dir(p.as_ref()) +} + +fn _remove_dir(p: &Path) -> CargoResult<()> { + fs::remove_dir(p).chain_err(|| format!("failed to remove directory `{}`", p.display()))?; + Ok(()) +} + +pub fn remove_file>(p: P) -> CargoResult<()> { + _remove_file(p.as_ref()) +} + +fn _remove_file(p: &Path) -> CargoResult<()> { + let mut err = match fs::remove_file(p) { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + if err.kind() == io::ErrorKind::PermissionDenied && set_not_readonly(p).unwrap_or(false) { + match fs::remove_file(p) { + Ok(()) => return Ok(()), + Err(e) => err = e, + } + } + + Err(err).chain_err(|| format!("failed to remove file `{}`", p.display()))?; + Ok(()) +} + +fn set_not_readonly(p: &Path) -> io::Result { + let mut perms = p.metadata()?.permissions(); + if !perms.readonly() { + return Ok(false); + } + perms.set_readonly(false); + fs::set_permissions(p, perms)?; + Ok(true) +} diff --git a/src/cargo/util/process_builder.rs b/src/cargo/util/process_builder.rs new file mode 100644 index 000000000..bf70044b8 --- /dev/null +++ b/src/cargo/util/process_builder.rs @@ -0,0 +1,345 @@ +use std::collections::HashMap; +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fmt; +use std::path::Path; +use std::process::{Command, Output, Stdio}; + +use jobserver::Client; +use shell_escape::escape; + +use util::{process_error, CargoError, CargoResult, CargoResultExt, read2}; + +/// A builder object for an external process, similar to `std::process::Command`. +#[derive(Clone, Debug)] +pub struct ProcessBuilder { + /// The program to execute. + program: OsString, + /// A list of arguments to pass to the program. + args: Vec, + /// Any environment variables that should be set for the program. + env: HashMap>, + /// Which directory to run the program from. + cwd: Option, + /// The `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + jobserver: Option, +} + +impl fmt::Display for ProcessBuilder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "`{}", self.program.to_string_lossy())?; + + for arg in &self.args { + write!(f, " {}", escape(arg.to_string_lossy()))?; + } + + write!(f, "`") + } +} + +impl ProcessBuilder { + /// (chainable) Set the executable for the process. + pub fn program>(&mut self, program: T) -> &mut ProcessBuilder { + self.program = program.as_ref().to_os_string(); + self + } + + /// (chainable) Add an arg to the args list. + pub fn arg>(&mut self, arg: T) -> &mut ProcessBuilder { + self.args.push(arg.as_ref().to_os_string()); + self + } + + /// (chainable) Add many args to the args list. + pub fn args>(&mut self, arguments: &[T]) -> &mut ProcessBuilder { + self.args + .extend(arguments.iter().map(|t| t.as_ref().to_os_string())); + self + } + + /// (chainable) Replace args with new args list + pub fn args_replace>(&mut self, arguments: &[T]) -> &mut ProcessBuilder { + self.args = arguments + .iter() + .map(|t| t.as_ref().to_os_string()) + .collect(); + self + } + + /// (chainable) Set the current working directory of the process + pub fn cwd>(&mut self, path: T) -> &mut ProcessBuilder { + self.cwd = Some(path.as_ref().to_os_string()); + self + } + + /// (chainable) Set an environment variable for the process. + pub fn env>(&mut self, key: &str, val: T) -> &mut ProcessBuilder { + self.env + .insert(key.to_string(), Some(val.as_ref().to_os_string())); + self + } + + /// (chainable) Unset an environment variable for the process. + pub fn env_remove(&mut self, key: &str) -> &mut ProcessBuilder { + self.env.insert(key.to_string(), None); + self + } + + /// Get the executable name. + pub fn get_program(&self) -> &OsString { + &self.program + } + + /// Get the program arguments + pub fn get_args(&self) -> &[OsString] { + &self.args + } + + /// Get the current working directory for the process + pub fn get_cwd(&self) -> Option<&Path> { + self.cwd.as_ref().map(Path::new) + } + + /// Get an environment variable as the process will see it (will inherit from environment + /// unless explicitally unset). + pub fn get_env(&self, var: &str) -> Option { + self.env + .get(var) + .cloned() + .or_else(|| Some(env::var_os(var))) + .and_then(|s| s) + } + + /// Get all environment variables explicitally set or unset for the process (not inherited + /// vars). + pub fn get_envs(&self) -> &HashMap> { + &self.env + } + + /// Set the `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + pub fn inherit_jobserver(&mut self, jobserver: &Client) -> &mut Self { + self.jobserver = Some(jobserver.clone()); + self + } + + /// Run the process, waiting for completion, and mapping non-success exit codes to an error. + pub fn exec(&self) -> CargoResult<()> { + let mut command = self.build_command(); + let exit = command.status().chain_err(|| { + process_error( + &format!("could not execute process `{}`", self.debug_string()), + None, + None, + ) + })?; + + if exit.success() { + Ok(()) + } else { + Err(process_error( + &format!( + "process didn't exit successfully: `{}`", + self.debug_string() + ), + Some(&exit), + None, + ).into()) + } + } + + /// On unix, executes the process using the unix syscall `execvp`, which will block this + /// process, and will only return if there is an error. On windows this is a synonym for + /// `exec`. + #[cfg(unix)] + pub fn exec_replace(&self) -> CargoResult<()> { + use std::os::unix::process::CommandExt; + + let mut command = self.build_command(); + let error = command.exec(); + Err(CargoError::from(error) + .context(process_error( + &format!("could not execute process `{}`", self.debug_string()), + None, + None, + )) + .into()) + } + + /// On unix, executes the process using the unix syscall `execvp`, which will block this + /// process, and will only return if there is an error. On windows this is a synonym for + /// `exec`. + #[cfg(windows)] + pub fn exec_replace(&self) -> CargoResult<()> { + self.exec() + } + + /// Execute the process, returning the stdio output, or an error if non-zero exit status. + pub fn exec_with_output(&self) -> CargoResult { + let mut command = self.build_command(); + + let output = command.output().chain_err(|| { + process_error( + &format!("could not execute process `{}`", self.debug_string()), + None, + None, + ) + })?; + + if output.status.success() { + Ok(output) + } else { + Err(process_error( + &format!( + "process didn't exit successfully: `{}`", + self.debug_string() + ), + Some(&output.status), + Some(&output), + ).into()) + } + } + + /// Execute a command, passing each line of stdout and stderr to the supplied callbacks, which + /// can mutate the string data. + /// + /// If any invocations of these function return an error, it will be propagated. + /// + /// Optionally, output can be passed to errors using `print_output` + pub fn exec_with_streaming( + &self, + on_stdout_line: &mut FnMut(&str) -> CargoResult<()>, + on_stderr_line: &mut FnMut(&str) -> CargoResult<()>, + print_output: bool, + ) -> CargoResult { + let mut stdout = Vec::new(); + let mut stderr = Vec::new(); + + let mut cmd = self.build_command(); + cmd.stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()); + + let mut callback_error = None; + let status = (|| { + let mut child = cmd.spawn()?; + let out = child.stdout.take().unwrap(); + let err = child.stderr.take().unwrap(); + read2(out, err, &mut |is_out, data, eof| { + let idx = if eof { + data.len() + } else { + match data.iter().rposition(|b| *b == b'\n') { + Some(i) => i + 1, + None => return, + } + }; + let data = data.drain(..idx); + let dst = if is_out { &mut stdout } else { &mut stderr }; + let start = dst.len(); + dst.extend(data); + for line in String::from_utf8_lossy(&dst[start..]).lines() { + if callback_error.is_some() { + break; + } + let callback_result = if is_out { + on_stdout_line(line) + } else { + on_stderr_line(line) + }; + if let Err(e) = callback_result { + callback_error = Some(e); + } + } + })?; + child.wait() + })() + .chain_err(|| { + process_error( + &format!("could not execute process `{}`", self.debug_string()), + None, + None, + ) + })?; + let output = Output { + stdout, + stderr, + status, + }; + + { + let to_print = if print_output { Some(&output) } else { None }; + if !output.status.success() { + return Err(process_error( + &format!( + "process didn't exit successfully: `{}`", + self.debug_string() + ), + Some(&output.status), + to_print, + ).into()); + } else if let Some(e) = callback_error { + let cx = process_error( + &format!("failed to parse process output: `{}`", self.debug_string()), + Some(&output.status), + to_print, + ); + return Err(CargoError::from(e).context(cx).into()); + } + } + + Ok(output) + } + + /// Converts ProcessBuilder into a `std::process::Command`, and handles the jobserver if + /// present. + pub fn build_command(&self) -> Command { + let mut command = Command::new(&self.program); + if let Some(cwd) = self.get_cwd() { + command.current_dir(cwd); + } + for arg in &self.args { + command.arg(arg); + } + for (k, v) in &self.env { + match *v { + Some(ref v) => { + command.env(k, v); + } + None => { + command.env_remove(k); + } + } + } + if let Some(ref c) = self.jobserver { + c.configure(&mut command); + } + command + } + + /// Get the command line for the process as a string. + fn debug_string(&self) -> String { + let mut program = format!("{}", self.program.to_string_lossy()); + for arg in &self.args { + program.push(' '); + program.push_str(&format!("{}", arg.to_string_lossy())); + } + program + } +} + +/// A helper function to create a `ProcessBuilder`. +pub fn process>(cmd: T) -> ProcessBuilder { + ProcessBuilder { + program: cmd.as_ref().to_os_string(), + args: Vec::new(), + cwd: None, + env: HashMap::new(), + jobserver: None, + } +} diff --git a/src/cargo/util/profile.rs b/src/cargo/util/profile.rs new file mode 100644 index 000000000..ce49eee26 --- /dev/null +++ b/src/cargo/util/profile.rs @@ -0,0 +1,89 @@ +use std::env; +use std::fmt; +use std::mem; +use std::time; +use std::iter::repeat; +use std::cell::RefCell; +use std::io::{stdout, StdoutLock, Write}; + +thread_local!(static PROFILE_STACK: RefCell> = RefCell::new(Vec::new())); +thread_local!(static MESSAGES: RefCell> = RefCell::new(Vec::new())); + +type Message = (usize, u64, String); + +pub struct Profiler { + desc: String, +} + +fn enabled_level() -> Option { + env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok()) +} + +pub fn start(desc: T) -> Profiler { + if enabled_level().is_none() { + return Profiler { + desc: String::new(), + }; + } + + PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now())); + + Profiler { + desc: desc.to_string(), + } +} + +impl Drop for Profiler { + fn drop(&mut self) { + let enabled = match enabled_level() { + Some(i) => i, + None => return, + }; + + let (start, stack_len) = PROFILE_STACK.with(|stack| { + let mut stack = stack.borrow_mut(); + let start = stack.pop().unwrap(); + (start, stack.len()) + }); + let duration = start.elapsed(); + let duration_ms = + duration.as_secs() * 1000 + u64::from(duration.subsec_nanos() / 1_000_000); + + let msg = ( + stack_len, + duration_ms, + mem::replace(&mut self.desc, String::new()), + ); + MESSAGES.with(|msgs| msgs.borrow_mut().push(msg)); + + if stack_len == 0 { + fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock) { + if lvl > enabled { + return; + } + let mut last = 0; + for (i, &(l, time, ref msg)) in msgs.iter().enumerate() { + if l != lvl { + continue; + } + writeln!( + stdout, + "{} {:6}ms - {}", + repeat(" ").take(lvl + 1).collect::(), + time, + msg + ).expect("printing profiling info to stdout"); + + print(lvl + 1, &msgs[last..i], enabled, stdout); + last = i; + } + } + let stdout = stdout(); + MESSAGES.with(|msgs| { + let mut msgs = msgs.borrow_mut(); + print(0, &msgs, enabled, &mut stdout.lock()); + msgs.clear(); + }); + } + } +} diff --git a/src/cargo/util/progress.rs b/src/cargo/util/progress.rs new file mode 100644 index 000000000..0a5af5b5e --- /dev/null +++ b/src/cargo/util/progress.rs @@ -0,0 +1,136 @@ +use std::cmp; +use std::env; +use std::iter; +use std::time::{Duration, Instant}; + +use core::shell::Verbosity; +use util::{CargoResult, Config}; + +pub struct Progress<'cfg> { + state: Option>, +} + +struct State<'cfg> { + config: &'cfg Config, + width: usize, + first: bool, + last_update: Instant, + name: String, + done: bool, +} + +impl<'cfg> Progress<'cfg> { + pub fn new(name: &str, cfg: &'cfg Config) -> Progress<'cfg> { + // report no progress when -q (for quiet) or TERM=dumb are set + let dumb = match env::var("TERM") { + Ok(term) => term == "dumb", + Err(_) => false, + }; + if cfg.shell().verbosity() == Verbosity::Quiet || dumb { + return Progress { state: None }; + } + + Progress { + state: cfg.shell().err_width().map(|n| State { + config: cfg, + width: cmp::min(n, 80), + first: true, + last_update: Instant::now(), + name: name.to_string(), + done: false, + }), + } + } + + pub fn tick(&mut self, cur: usize, max: usize) -> CargoResult<()> { + match self.state { + Some(ref mut s) => s.tick(cur, max), + None => Ok(()), + } + } +} + +impl<'cfg> State<'cfg> { + fn tick(&mut self, cur: usize, max: usize) -> CargoResult<()> { + if self.done { + return Ok(()); + } + + // Don't update too often as it can cause excessive performance loss + // just putting stuff onto the terminal. We also want to avoid + // flickering by not drawing anything that goes away too quickly. As a + // result we've got two branches here: + // + // 1. If we haven't drawn anything, we wait for a period of time to + // actually start drawing to the console. This ensures that + // short-lived operations don't flicker on the console. Currently + // there's a 500ms delay to when we first draw something. + // 2. If we've drawn something, then we rate limit ourselves to only + // draw to the console every so often. Currently there's a 100ms + // delay between updates. + if self.first { + let delay = Duration::from_millis(500); + if self.last_update.elapsed() < delay { + return Ok(()); + } + self.first = false; + } else { + let interval = Duration::from_millis(100); + if self.last_update.elapsed() < interval { + return Ok(()); + } + } + self.last_update = Instant::now(); + + // Render the percentage at the far right and then figure how long the + // progress bar is + let pct = (cur as f64) / (max as f64); + let pct = if !pct.is_finite() { 0.0 } else { pct }; + let stats = format!(" {:6.02}%", pct * 100.0); + let extra_len = stats.len() + 2 /* [ and ] */ + 15 /* status header */; + let display_width = match self.width.checked_sub(extra_len) { + Some(n) => n, + None => return Ok(()), + }; + let mut string = String::from("["); + let hashes = display_width as f64 * pct; + let hashes = hashes as usize; + + // Draw the `===>` + if hashes > 0 { + for _ in 0..hashes - 1 { + string.push_str("="); + } + if cur == max { + self.done = true; + string.push_str("="); + } else { + string.push_str(">"); + } + } + + // Draw the empty space we have left to do + for _ in 0..(display_width - hashes) { + string.push_str(" "); + } + string.push_str("]"); + string.push_str(&stats); + + // Write out a pretty header, then the progress bar itself, and then + // return back to the beginning of the line for the next print. + self.config.shell().status_header(&self.name)?; + write!(self.config.shell().err(), "{}\r", string)?; + Ok(()) + } +} + +fn clear(width: usize, config: &Config) { + let blank = iter::repeat(" ").take(width).collect::(); + drop(write!(config.shell().err(), "{}\r", blank)); +} + +impl<'cfg> Drop for State<'cfg> { + fn drop(&mut self) { + clear(self.width, self.config); + } +} diff --git a/src/cargo/util/read2.rs b/src/cargo/util/read2.rs new file mode 100644 index 000000000..13a50a724 --- /dev/null +++ b/src/cargo/util/read2.rs @@ -0,0 +1,185 @@ +pub use self::imp::read2; + +#[cfg(unix)] +mod imp { + use std::io::prelude::*; + use std::io; + use std::mem; + use std::os::unix::prelude::*; + use std::process::{ChildStderr, ChildStdout}; + use libc; + + pub fn read2( + mut out_pipe: ChildStdout, + mut err_pipe: ChildStderr, + data: &mut FnMut(bool, &mut Vec, bool), + ) -> io::Result<()> { + unsafe { + libc::fcntl(out_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + libc::fcntl(err_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + } + + let mut out_done = false; + let mut err_done = false; + let mut out = Vec::new(); + let mut err = Vec::new(); + + let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() }; + fds[0].fd = out_pipe.as_raw_fd(); + fds[0].events = libc::POLLIN; + fds[1].fd = err_pipe.as_raw_fd(); + fds[1].events = libc::POLLIN; + let mut nfds = 2; + let mut errfd = 1; + + while nfds > 0 { + // wait for either pipe to become readable using `select` + let r = unsafe { libc::poll(fds.as_mut_ptr(), nfds, -1) }; + if r == -1 { + let err = io::Error::last_os_error(); + if err.kind() == io::ErrorKind::Interrupted { + continue; + } + return Err(err); + } + + // Read as much as we can from each pipe, ignoring EWOULDBLOCK or + // EAGAIN. If we hit EOF, then this will happen because the underlying + // reader will return Ok(0), in which case we'll see `Ok` ourselves. In + // this case we flip the other fd back into blocking mode and read + // whatever's leftover on that file descriptor. + let handle = |res: io::Result<_>| match res { + Ok(_) => Ok(true), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + Ok(false) + } else { + Err(e) + } + } + }; + if !err_done && fds[errfd].revents != 0 && handle(err_pipe.read_to_end(&mut err))? { + err_done = true; + nfds -= 1; + } + data(false, &mut err, err_done); + if !out_done && fds[0].revents != 0 && handle(out_pipe.read_to_end(&mut out))? { + out_done = true; + fds[0].fd = err_pipe.as_raw_fd(); + errfd = 0; + nfds -= 1; + } + data(true, &mut out, out_done); + } + Ok(()) + } +} + +#[cfg(windows)] +mod imp { + extern crate miow; + extern crate winapi; + + use std::io; + use std::os::windows::prelude::*; + use std::process::{ChildStderr, ChildStdout}; + use std::slice; + + use self::miow::iocp::{CompletionPort, CompletionStatus}; + use self::miow::pipe::NamedPipe; + use self::miow::Overlapped; + use self::winapi::shared::winerror::ERROR_BROKEN_PIPE; + + struct Pipe<'a> { + dst: &'a mut Vec, + overlapped: Overlapped, + pipe: NamedPipe, + done: bool, + } + + pub fn read2( + out_pipe: ChildStdout, + err_pipe: ChildStderr, + data: &mut FnMut(bool, &mut Vec, bool), + ) -> io::Result<()> { + let mut out = Vec::new(); + let mut err = Vec::new(); + + let port = CompletionPort::new(1)?; + port.add_handle(0, &out_pipe)?; + port.add_handle(1, &err_pipe)?; + + unsafe { + let mut out_pipe = Pipe::new(out_pipe, &mut out); + let mut err_pipe = Pipe::new(err_pipe, &mut err); + + out_pipe.read()?; + err_pipe.read()?; + + let mut status = [CompletionStatus::zero(), CompletionStatus::zero()]; + + while !out_pipe.done || !err_pipe.done { + for status in port.get_many(&mut status, None)? { + if status.token() == 0 { + out_pipe.complete(status); + data(true, out_pipe.dst, out_pipe.done); + out_pipe.read()?; + } else { + err_pipe.complete(status); + data(false, err_pipe.dst, err_pipe.done); + err_pipe.read()?; + } + } + } + + Ok(()) + } + } + + impl<'a> Pipe<'a> { + unsafe fn new(p: P, dst: &'a mut Vec) -> Pipe<'a> { + Pipe { + dst, + pipe: NamedPipe::from_raw_handle(p.into_raw_handle()), + overlapped: Overlapped::zero(), + done: false, + } + } + + unsafe fn read(&mut self) -> io::Result<()> { + let dst = slice_to_end(self.dst); + match self.pipe.read_overlapped(dst, self.overlapped.raw()) { + Ok(_) => Ok(()), + Err(e) => { + if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { + self.done = true; + Ok(()) + } else { + Err(e) + } + } + } + } + + unsafe fn complete(&mut self, status: &CompletionStatus) { + let prev = self.dst.len(); + self.dst.set_len(prev + status.bytes_transferred() as usize); + if status.bytes_transferred() == 0 { + self.done = true; + } + } + } + + unsafe fn slice_to_end(v: &mut Vec) -> &mut [u8] { + if v.capacity() == 0 { + v.reserve(16); + } + if v.capacity() == v.len() { + v.reserve(1); + } + slice::from_raw_parts_mut( + v.as_mut_ptr().offset(v.len() as isize), + v.capacity() - v.len(), + ) + } +} diff --git a/src/cargo/util/rustc.rs b/src/cargo/util/rustc.rs new file mode 100644 index 000000000..c6de81889 --- /dev/null +++ b/src/cargo/util/rustc.rs @@ -0,0 +1,245 @@ +#![allow(deprecated)] // for SipHasher + +use std::path::{Path, PathBuf}; +use std::hash::{Hash, Hasher, SipHasher}; +use std::collections::hash_map::{Entry, HashMap}; +use std::sync::Mutex; +use std::env; + +use serde_json; + +use util::{self, internal, profile, CargoResult, ProcessBuilder}; +use util::paths; + +/// Information on the `rustc` executable +#[derive(Debug)] +pub struct Rustc { + /// The location of the exe + pub path: PathBuf, + /// An optional program that will be passed the path of the rust exe as its first argument, and + /// rustc args following this. + pub wrapper: Option, + /// Verbose version information (the output of `rustc -vV`) + pub verbose_version: String, + /// The host triple (arch-platform-OS), this comes from verbose_version. + pub host: String, + cache: Mutex, +} + +impl Rustc { + /// Run the compiler at `path` to learn various pieces of information about + /// it, with an optional wrapper. + /// + /// If successful this function returns a description of the compiler along + /// with a list of its capabilities. + pub fn new( + path: PathBuf, + wrapper: Option, + rustup_rustc: &Path, + cache_location: Option, + ) -> CargoResult { + let _p = profile::start("Rustc::new"); + + let mut cache = Cache::load(&path, rustup_rustc, cache_location); + + let mut cmd = util::process(&path); + cmd.arg("-vV"); + let verbose_version = cache.cached_output(&cmd)?.0; + + let host = { + let triple = verbose_version + .lines() + .find(|l| l.starts_with("host: ")) + .map(|l| &l[6..]) + .ok_or_else(|| internal("rustc -v didn't have a line for `host:`"))?; + triple.to_string() + }; + + Ok(Rustc { + path, + wrapper, + verbose_version, + host, + cache: Mutex::new(cache), + }) + } + + /// Get a process builder set up to use the found rustc version, with a wrapper if Some + pub fn process(&self) -> ProcessBuilder { + if let Some(ref wrapper) = self.wrapper { + let mut cmd = util::process(wrapper); + { + cmd.arg(&self.path); + } + cmd + } else { + util::process(&self.path) + } + } + + pub fn cached_output(&self, cmd: &ProcessBuilder) -> CargoResult<(String, String)> { + self.cache.lock().unwrap().cached_output(cmd) + } +} + +/// It is a well known that `rustc` is not the fastest compiler in the world. +/// What is less known is that even `rustc --version --verbose` takes about a +/// hundred milliseconds! Because we need compiler version info even for no-op +/// builds, we cache it here, based on compiler's mtime and rustup's current +/// toolchain. +/// +/// https://github.com/rust-lang/cargo/issues/5315 +/// https://github.com/rust-lang/rust/issues/49761 +#[derive(Debug)] +struct Cache { + cache_location: Option, + dirty: bool, + data: CacheData, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +struct CacheData { + rustc_fingerprint: u64, + outputs: HashMap, +} + +impl Cache { + fn load(rustc: &Path, rustup_rustc: &Path, cache_location: Option) -> Cache { + match (cache_location, rustc_fingerprint(rustc, rustup_rustc)) { + (Some(cache_location), Ok(rustc_fingerprint)) => { + let empty = CacheData { + rustc_fingerprint, + outputs: HashMap::new(), + }; + let mut dirty = true; + let data = match read(&cache_location) { + Ok(data) => { + if data.rustc_fingerprint == rustc_fingerprint { + info!("reusing existing rustc info cache"); + dirty = false; + data + } else { + info!("different compiler, creating new rustc info cache"); + empty + } + } + Err(e) => { + info!("failed to read rustc info cache: {}", e); + empty + } + }; + return Cache { + cache_location: Some(cache_location), + dirty, + data, + }; + + fn read(path: &Path) -> CargoResult { + let json = paths::read(path)?; + Ok(serde_json::from_str(&json)?) + } + } + (_, fingerprint) => { + if let Err(e) = fingerprint { + warn!("failed to calculate rustc fingerprint: {}", e); + } + info!("rustc info cache disabled"); + Cache { + cache_location: None, + dirty: false, + data: CacheData::default(), + } + } + } + } + + fn cached_output(&mut self, cmd: &ProcessBuilder) -> CargoResult<(String, String)> { + let key = process_fingerprint(cmd); + match self.data.outputs.entry(key) { + Entry::Occupied(entry) => { + info!("rustc info cache hit"); + Ok(entry.get().clone()) + } + Entry::Vacant(entry) => { + info!("rustc info cache miss"); + let output = cmd.exec_with_output()?; + let stdout = String::from_utf8(output.stdout) + .map_err(|_| internal("rustc didn't return utf8 output"))?; + let stderr = String::from_utf8(output.stderr) + .map_err(|_| internal("rustc didn't return utf8 output"))?; + let output = (stdout, stderr); + entry.insert(output.clone()); + self.dirty = true; + Ok(output) + } + } + } +} + +impl Drop for Cache { + fn drop(&mut self) { + if !self.dirty { + return; + } + if let Some(ref path) = self.cache_location { + let json = serde_json::to_string(&self.data).unwrap(); + match paths::write(path, json.as_bytes()) { + Ok(()) => info!("updated rustc info cache"), + Err(e) => warn!("failed to update rustc info cache: {}", e), + } + } + } +} + +fn rustc_fingerprint(path: &Path, rustup_rustc: &Path) -> CargoResult { + let mut hasher = SipHasher::new_with_keys(0, 0); + + let path = paths::resolve_executable(path)?; + path.hash(&mut hasher); + + paths::mtime(&path)?.hash(&mut hasher); + + // Rustup can change the effective compiler without touching + // the `rustc` binary, so we try to account for this here. + // If we see rustup's env vars, we mix them into the fingerprint, + // but we also mix in the mtime of the actual compiler (and not + // the rustup shim at `~/.cargo/bin/rustup`), because `RUSTUP_TOOLCHAIN` + // could be just `stable-x86_64-unknown-linux-gnu`, i.e, it could + // not mention the version of Rust at all, which changes after + // `rustup update`. + // + // If we don't see rustup env vars, but it looks like the compiler + // is managed by rustup, we conservatively bail out. + let maybe_rustup = rustup_rustc == path; + match ( + maybe_rustup, + env::var("RUSTUP_HOME"), + env::var("RUSTUP_TOOLCHAIN"), + ) { + (_, Ok(rustup_home), Ok(rustup_toolchain)) => { + debug!("adding rustup info to rustc fingerprint"); + rustup_toolchain.hash(&mut hasher); + rustup_home.hash(&mut hasher); + let real_rustc = Path::new(&rustup_home) + .join("toolchains") + .join(rustup_toolchain) + .join("bin") + .join("rustc") + .with_extension(env::consts::EXE_EXTENSION); + paths::mtime(&real_rustc)?.hash(&mut hasher); + } + (true, _, _) => bail!("probably rustup rustc, but without rustup's env vars"), + _ => (), + } + + Ok(hasher.finish()) +} + +fn process_fingerprint(cmd: &ProcessBuilder) -> u64 { + let mut hasher = SipHasher::new_with_keys(0, 0); + cmd.get_args().hash(&mut hasher); + let mut env = cmd.get_envs().iter().collect::>(); + env.sort(); + env.hash(&mut hasher); + hasher.finish() +} diff --git a/src/cargo/util/sha256.rs b/src/cargo/util/sha256.rs new file mode 100644 index 000000000..604bb2941 --- /dev/null +++ b/src/cargo/util/sha256.rs @@ -0,0 +1,23 @@ +extern crate crypto_hash; +use self::crypto_hash::{Algorithm, Hasher}; +use std::io::Write; + +pub struct Sha256(Hasher); + +impl Sha256 { + pub fn new() -> Sha256 { + let hasher = Hasher::new(Algorithm::SHA256); + Sha256(hasher) + } + + pub fn update(&mut self, bytes: &[u8]) { + let _ = self.0.write_all(bytes); + } + + pub fn finish(&mut self) -> [u8; 32] { + let mut ret = [0u8; 32]; + let data = self.0.finish(); + ret.copy_from_slice(&data[..]); + ret + } +} diff --git a/src/cargo/util/to_semver.rs b/src/cargo/util/to_semver.rs new file mode 100644 index 000000000..4ffd6e3c0 --- /dev/null +++ b/src/cargo/util/to_semver.rs @@ -0,0 +1,33 @@ +use semver::Version; +use util::errors::CargoResult; + +pub trait ToSemver { + fn to_semver(self) -> CargoResult; +} + +impl ToSemver for Version { + fn to_semver(self) -> CargoResult { + Ok(self) + } +} + +impl<'a> ToSemver for &'a str { + fn to_semver(self) -> CargoResult { + match Version::parse(self) { + Ok(v) => Ok(v), + Err(..) => Err(format_err!("cannot parse '{}' as a semver", self)), + } + } +} + +impl<'a> ToSemver for &'a String { + fn to_semver(self) -> CargoResult { + (**self).to_semver() + } +} + +impl<'a> ToSemver for &'a Version { + fn to_semver(self) -> CargoResult { + Ok(self.clone()) + } +} diff --git a/src/cargo/util/to_url.rs b/src/cargo/util/to_url.rs new file mode 100644 index 000000000..664c2568d --- /dev/null +++ b/src/cargo/util/to_url.rs @@ -0,0 +1,23 @@ +use std::path::Path; + +use url::Url; + +use util::CargoResult; + +/// A type that can be converted to a Url +pub trait ToUrl { + /// Performs the conversion + fn to_url(self) -> CargoResult; +} + +impl<'a> ToUrl for &'a str { + fn to_url(self) -> CargoResult { + Url::parse(self).map_err(|s| format_err!("invalid url `{}`: {}", self, s)) + } +} + +impl<'a> ToUrl for &'a Path { + fn to_url(self) -> CargoResult { + Url::from_file_path(self).map_err(|()| format_err!("invalid path url `{}`", self.display())) + } +} diff --git a/src/cargo/util/toml/mod.rs b/src/cargo/util/toml/mod.rs new file mode 100644 index 000000000..fd664572e --- /dev/null +++ b/src/cargo/util/toml/mod.rs @@ -0,0 +1,1416 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::fmt; +use std::fs; +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::str; + +use semver::{self, VersionReq}; +use serde::de::{self, Deserialize}; +use serde::ser; +use serde_ignored; +use toml; +use url::Url; + +use core::dependency::{Kind, Platform}; +use core::manifest::{LibKind, ManifestMetadata}; +use core::profiles::Profiles; +use core::{Dependency, Manifest, PackageId, Summary, Target}; +use core::{Edition, EitherManifest, Feature, Features, VirtualManifest}; +use core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; +use sources::CRATES_IO; +use util::errors::{CargoError, CargoResult, CargoResultExt}; +use util::paths; +use util::{self, Config, ToUrl}; + +mod targets; +use self::targets::targets; + +pub fn read_manifest( + path: &Path, + source_id: &SourceId, + config: &Config, +) -> CargoResult<(EitherManifest, Vec)> { + trace!( + "read_manifest; path={}; source-id={}", + path.display(), + source_id + ); + let contents = paths::read(path)?; + + let ret = do_read_manifest(&contents, path, source_id, config) + .chain_err(|| format!("failed to parse manifest at `{}`", path.display()))?; + Ok(ret) +} + +fn do_read_manifest( + contents: &str, + manifest_file: &Path, + source_id: &SourceId, + config: &Config, +) -> CargoResult<(EitherManifest, Vec)> { + let package_root = manifest_file.parent().unwrap(); + + let toml = { + let pretty_filename = + util::without_prefix(manifest_file, config.cwd()).unwrap_or(manifest_file); + parse(contents, pretty_filename, config)? + }; + + let mut unused = BTreeSet::new(); + let manifest: TomlManifest = serde_ignored::deserialize(toml, |path| { + let mut key = String::new(); + stringify(&mut key, &path); + unused.insert(key); + })?; + + let manifest = Rc::new(manifest); + return if manifest.project.is_some() || manifest.package.is_some() { + let (mut manifest, paths) = + TomlManifest::to_real_manifest(&manifest, source_id, package_root, config)?; + for key in unused { + manifest.add_warning(format!("unused manifest key: {}", key)); + } + if !manifest.targets().iter().any(|t| !t.is_custom_build()) { + bail!( + "no targets specified in the manifest\n \ + either src/lib.rs, src/main.rs, a [lib] section, or \ + [[bin]] section must be present" + ) + } + Ok((EitherManifest::Real(manifest), paths)) + } else { + let (m, paths) = + TomlManifest::to_virtual_manifest(&manifest, source_id, package_root, config)?; + Ok((EitherManifest::Virtual(m), paths)) + }; + + fn stringify(dst: &mut String, path: &serde_ignored::Path) { + use serde_ignored::Path; + + match *path { + Path::Root => {} + Path::Seq { parent, index } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(&index.to_string()); + } + Path::Map { parent, ref key } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(key); + } + Path::Some { parent } + | Path::NewtypeVariant { parent } + | Path::NewtypeStruct { parent } => stringify(dst, parent), + } + } +} + +pub fn parse(toml: &str, file: &Path, config: &Config) -> CargoResult { + let first_error = match toml.parse() { + Ok(ret) => return Ok(ret), + Err(e) => e, + }; + + let mut second_parser = toml::de::Deserializer::new(toml); + second_parser.set_require_newline_after_table(false); + if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { + let msg = format!( + "\ +TOML file found which contains invalid syntax and will soon not parse +at `{}`. + +The TOML spec requires newlines after table definitions (e.g. `[a] b = 1` is +invalid), but this file has a table header which does not have a newline after +it. A newline needs to be added and this warning will soon become a hard error +in the future.", + file.display() + ); + config.shell().warn(&msg)?; + return Ok(ret); + } + + let first_error = CargoError::from(first_error); + Err(first_error.context("could not parse input as TOML").into()) +} + +type TomlLibTarget = TomlTarget; +type TomlBinTarget = TomlTarget; +type TomlExampleTarget = TomlTarget; +type TomlTestTarget = TomlTarget; +type TomlBenchTarget = TomlTarget; + +#[derive(Debug, Serialize)] +#[serde(untagged)] +pub enum TomlDependency { + Simple(String), + Detailed(DetailedTomlDependency), +} + +impl<'de> de::Deserialize<'de> for TomlDependency { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct TomlDependencyVisitor; + + impl<'de> de::Visitor<'de> for TomlDependencyVisitor { + type Value = TomlDependency; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str( + "a version string like \"0.9.8\" or a \ + detailed dependency like { version = \"0.9.8\" }", + ) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(TomlDependency::Simple(s.to_owned())) + } + + fn visit_map(self, map: V) -> Result + where + V: de::MapAccess<'de>, + { + let mvd = de::value::MapAccessDeserializer::new(map); + DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) + } + } + + deserializer.deserialize_any(TomlDependencyVisitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +#[serde(rename_all = "kebab-case")] +pub struct DetailedTomlDependency { + version: Option, + registry: Option, + registry_index: Option, + path: Option, + git: Option, + branch: Option, + tag: Option, + rev: Option, + features: Option>, + optional: Option, + default_features: Option, + #[serde(rename = "default_features")] + default_features2: Option, + package: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct TomlManifest { + cargo_features: Option>, + package: Option>, + project: Option>, + profile: Option, + lib: Option, + bin: Option>, + example: Option>, + test: Option>, + bench: Option>, + dependencies: Option>, + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + features: Option>>, + target: Option>, + replace: Option>, + patch: Option>>, + workspace: Option, + badges: Option>>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct TomlProfiles { + test: Option, + doc: Option, + bench: Option, + dev: Option, + release: Option, +} + +impl TomlProfiles { + fn validate(&self, features: &Features, warnings: &mut Vec) -> CargoResult<()> { + if let Some(ref test) = self.test { + test.validate("test", features, warnings)?; + } + if let Some(ref doc) = self.doc { + doc.validate("doc", features, warnings)?; + } + if let Some(ref bench) = self.bench { + bench.validate("bench", features, warnings)?; + } + if let Some(ref dev) = self.dev { + dev.validate("dev", features, warnings)?; + } + if let Some(ref release) = self.release { + release.validate("release", features, warnings)?; + } + Ok(()) + } +} + +#[derive(Clone, Debug)] +pub struct TomlOptLevel(pub String); + +impl<'de> de::Deserialize<'de> for TomlOptLevel { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = TomlOptLevel; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an optimization level") + } + + fn visit_i64(self, value: i64) -> Result + where + E: de::Error, + { + Ok(TomlOptLevel(value.to_string())) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if value == "s" || value == "z" { + Ok(TomlOptLevel(value.to_string())) + } else { + Err(E::custom(format!( + "must be an integer, `z`, or `s`, \ + but found: {}", + value + ))) + } + } + } + + d.deserialize_u32(Visitor) + } +} + +impl ser::Serialize for TomlOptLevel { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + match self.0.parse::() { + Ok(n) => n.serialize(serializer), + Err(_) => self.0.serialize(serializer), + } + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum U32OrBool { + U32(u32), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for U32OrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = U32OrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or an integer") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(U32OrBool::Bool(b)) + } + + fn visit_i64(self, u: i64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_u64(self, u: u64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +#[serde(rename_all = "kebab-case")] +pub struct TomlProfile { + pub opt_level: Option, + pub lto: Option, + pub codegen_units: Option, + pub debug: Option, + pub debug_assertions: Option, + pub rpath: Option, + pub panic: Option, + pub overflow_checks: Option, + pub incremental: Option, + pub overrides: Option>, + pub build_override: Option>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub enum ProfilePackageSpec { + Spec(PackageIdSpec), + All, +} + +impl ser::Serialize for ProfilePackageSpec { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + ProfilePackageSpec::Spec(ref spec) => spec.serialize(s), + ProfilePackageSpec::All => "*".serialize(s), + } + } +} + +impl<'de> de::Deserialize<'de> for ProfilePackageSpec { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + if string == "*" { + Ok(ProfilePackageSpec::All) + } else { + PackageIdSpec::parse(&string) + .map_err(de::Error::custom) + .map(|s| ProfilePackageSpec::Spec(s)) + } + } +} + +impl TomlProfile { + fn validate( + &self, + name: &str, + features: &Features, + warnings: &mut Vec, + ) -> CargoResult<()> { + if let Some(ref profile) = self.build_override { + features.require(Feature::profile_overrides())?; + profile.validate_override()?; + } + if let Some(ref override_map) = self.overrides { + features.require(Feature::profile_overrides())?; + for profile in override_map.values() { + profile.validate_override()?; + } + } + + match name { + "dev" | "release" => {} + _ => { + if self.overrides.is_some() || self.build_override.is_some() { + bail!( + "Profile overrides may only be specified for \ + `dev` or `release` profile, not `{}`.", + name + ); + } + } + } + + match name { + "doc" => { + warnings.push("profile `doc` is deprecated and has no effect".to_string()); + } + "test" | "bench" => { + if self.panic.is_some() { + warnings.push(format!("`panic` setting is ignored for `{}` profile", name)) + } + } + _ => {} + } + Ok(()) + } + + fn validate_override(&self) -> CargoResult<()> { + if self.overrides.is_some() || self.build_override.is_some() { + bail!("Profile overrides cannot be nested."); + } + if self.panic.is_some() { + bail!("`panic` may not be specified in a profile override.") + } + if self.lto.is_some() { + bail!("`lto` may not be specified in a profile override.") + } + if self.rpath.is_some() { + bail!("`rpath` may not be specified in a profile override.") + } + Ok(()) + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum StringOrBool { + String(String), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for StringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or a string") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(StringOrBool::Bool(b)) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(StringOrBool::String(s.to_string())) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum VecStringOrBool { + VecString(Vec), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for VecStringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = VecStringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or vector of strings") + } + + fn visit_seq(self, v: V) -> Result + where + V: de::SeqAccess<'de>, + { + let seq = de::value::SeqAccessDeserializer::new(v); + Vec::deserialize(seq).map(VecStringOrBool::VecString) + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(VecStringOrBool::Bool(b)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct TomlProject { + name: String, + version: semver::Version, + authors: Option>, + build: Option, + links: Option, + exclude: Option>, + include: Option>, + publish: Option, + #[serde(rename = "publish-lockfile")] + publish_lockfile: Option, + workspace: Option, + #[serde(rename = "im-a-teapot")] + im_a_teapot: Option, + autobins: Option, + autoexamples: Option, + autotests: Option, + autobenches: Option, + #[serde(rename = "namespaced-features")] + namespaced_features: Option, + + // package metadata + description: Option, + homepage: Option, + documentation: Option, + readme: Option, + keywords: Option>, + categories: Option>, + license: Option, + #[serde(rename = "license-file")] + license_file: Option, + repository: Option, + metadata: Option, + edition: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TomlWorkspace { + members: Option>, + #[serde(rename = "default-members")] + default_members: Option>, + exclude: Option>, +} + +impl TomlProject { + pub fn to_package_id(&self, source_id: &SourceId) -> CargoResult { + PackageId::new(&self.name, self.version.clone(), source_id) + } +} + +struct Context<'a, 'b> { + pkgid: Option<&'a PackageId>, + deps: &'a mut Vec, + source_id: &'a SourceId, + nested_paths: &'a mut Vec, + config: &'b Config, + warnings: &'a mut Vec, + platform: Option, + root: &'a Path, + features: &'a Features, +} + +impl TomlManifest { + pub fn prepare_for_publish(&self, config: &Config) -> CargoResult { + let mut package = self.package + .as_ref() + .or_else(|| self.project.as_ref()) + .unwrap() + .clone(); + package.workspace = None; + return Ok(TomlManifest { + package: Some(package), + project: None, + profile: self.profile.clone(), + lib: self.lib.clone(), + bin: self.bin.clone(), + example: self.example.clone(), + test: self.test.clone(), + bench: self.bench.clone(), + dependencies: map_deps(config, self.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + self.dev_dependencies + .as_ref() + .or_else(|| self.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + self.build_dependencies + .as_ref() + .or_else(|| self.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + features: self.features.clone(), + target: match self.target.as_ref().map(|target_map| { + target_map + .iter() + .map(|(k, v)| { + Ok(( + k.clone(), + TomlPlatform { + dependencies: map_deps(config, v.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + v.dev_dependencies + .as_ref() + .or_else(|| v.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + v.build_dependencies + .as_ref() + .or_else(|| v.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + }, + )) + }) + .collect() + }) { + Some(Ok(v)) => Some(v), + Some(Err(e)) => return Err(e), + None => None, + }, + replace: None, + patch: None, + workspace: None, + badges: self.badges.clone(), + cargo_features: self.cargo_features.clone(), + }); + + fn map_deps( + config: &Config, + deps: Option<&BTreeMap>, + ) -> CargoResult>> { + let deps = match deps { + Some(deps) => deps, + None => return Ok(None), + }; + let deps = deps.iter() + .map(|(k, v)| Ok((k.clone(), map_dependency(config, v)?))) + .collect::>>()?; + Ok(Some(deps)) + } + + fn map_dependency(config: &Config, dep: &TomlDependency) -> CargoResult { + match *dep { + TomlDependency::Detailed(ref d) => { + let mut d = d.clone(); + d.path.take(); // path dependencies become crates.io deps + // registry specifications are elaborated to the index URL + if let Some(registry) = d.registry.take() { + let src = SourceId::alt_registry(config, ®istry)?; + d.registry_index = Some(src.url().to_string()); + } + Ok(TomlDependency::Detailed(d)) + } + TomlDependency::Simple(ref s) => { + Ok(TomlDependency::Detailed(DetailedTomlDependency { + version: Some(s.clone()), + ..Default::default() + })) + } + } + } + } + + fn to_real_manifest( + me: &Rc, + source_id: &SourceId, + package_root: &Path, + config: &Config, + ) -> CargoResult<(Manifest, Vec)> { + let mut nested_paths = vec![]; + let mut warnings = vec![]; + let mut errors = vec![]; + + // Parse features first so they will be available when parsing other parts of the toml + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(&cargo_features, &mut warnings)?; + + let project = me.project.as_ref().or_else(|| me.package.as_ref()); + let project = project.ok_or_else(|| format_err!("no `package` section found"))?; + + let package_name = project.name.trim(); + if package_name.is_empty() { + bail!("package name cannot be an empty string") + } + + let pkgid = project.to_package_id(source_id)?; + + let edition = if let Some(ref edition) = project.edition { + features + .require(Feature::edition()) + .chain_err(|| "editions are unstable")?; + edition.parse() + .chain_err(|| "failed to parse the `edition` key")? + } else { + Edition::Edition2015 + }; + + // If we have no lib at all, use the inferred lib if available + // If we have a lib with a path, we're done + // If we have a lib with no path, use the inferred lib or_else package name + let targets = targets( + me, + package_name, + package_root, + edition, + &project.build, + &mut warnings, + &mut errors, + )?; + + if targets.is_empty() { + debug!("manifest has no build targets"); + } + + if let Err(e) = unique_build_targets(&targets, package_root) { + warnings.push(format!( + "file found to be present in multiple \ + build targets: {}", + e + )); + } + + let mut deps = Vec::new(); + let replace; + let patch; + + { + let mut cx = Context { + pkgid: Some(&pkgid), + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + features: &features, + platform: None, + root: package_root, + }; + + fn process_dependencies( + cx: &mut Context, + new_deps: Option<&BTreeMap>, + kind: Option, + ) -> CargoResult<()> { + let dependencies = match new_deps { + Some(dependencies) => dependencies, + None => return Ok(()), + }; + for (n, v) in dependencies.iter() { + let dep = v.to_dependency(n, cx, kind)?; + cx.deps.push(dep); + } + + Ok(()) + } + + // Collect the deps + process_dependencies(&mut cx, me.dependencies.as_ref(), None)?; + let dev_deps = me.dev_dependencies + .as_ref() + .or_else(|| me.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + let build_deps = me.build_dependencies + .as_ref() + .or_else(|| me.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + + for (name, platform) in me.target.iter().flat_map(|t| t) { + cx.platform = Some(name.parse()?); + process_dependencies(&mut cx, platform.dependencies.as_ref(), None)?; + let build_deps = platform + .build_dependencies + .as_ref() + .or_else(|| platform.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + let dev_deps = platform + .dev_dependencies + .as_ref() + .or_else(|| platform.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + } + + replace = me.replace(&mut cx)?; + patch = me.patch(&mut cx)?; + } + + { + let mut names_sources = BTreeMap::new(); + for dep in &deps { + let name = dep.rename().unwrap_or(dep.name().as_str()); + let prev = names_sources.insert(name.to_string(), dep.source_id()); + if prev.is_some() && prev != Some(dep.source_id()) { + bail!( + "Dependency '{}' has different source paths depending on the build \ + target. Each dependency must have a single canonical source path \ + irrespective of build target.", + name + ); + } + } + } + + let exclude = project.exclude.clone().unwrap_or_default(); + let include = project.include.clone().unwrap_or_default(); + if project.namespaced_features.is_some() { + features.require(Feature::namespaced_features())?; + } + + let summary = Summary::new( + pkgid, + deps, + me.features.clone().unwrap_or_else(BTreeMap::new), + project.links.clone(), + project.namespaced_features.unwrap_or(false), + )?; + let metadata = ManifestMetadata { + description: project.description.clone(), + homepage: project.homepage.clone(), + documentation: project.documentation.clone(), + readme: project.readme.clone(), + authors: project.authors.clone().unwrap_or_default(), + license: project.license.clone(), + license_file: project.license_file.clone(), + repository: project.repository.clone(), + keywords: project.keywords.clone().unwrap_or_default(), + categories: project.categories.clone().unwrap_or_default(), + badges: me.badges.clone().unwrap_or_default(), + links: project.links.clone(), + }; + + let workspace_config = match (me.workspace.as_ref(), project.workspace.as_ref()) { + (Some(config), None) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + &package_root, + &config.members, + &config.default_members, + &config.exclude, + )), + (None, root) => WorkspaceConfig::Member { + root: root.cloned(), + }, + (Some(..), Some(..)) => bail!( + "cannot configure both `package.workspace` and \ + `[workspace]`, only one can be specified" + ), + }; + if let Some(ref profiles) = me.profile { + profiles.validate(&features, &mut warnings)?; + } + let profiles = build_profiles(&me.profile); + let publish = match project.publish { + Some(VecStringOrBool::VecString(ref vecstring)) => { + features + .require(Feature::alternative_registries()) + .chain_err(|| { + "the `publish` manifest key is unstable for anything other than a value of true or false" + })?; + Some(vecstring.clone()) + } + Some(VecStringOrBool::Bool(false)) => Some(vec![]), + None | Some(VecStringOrBool::Bool(true)) => None, + }; + + let publish_lockfile = match project.publish_lockfile { + Some(b) => { + features.require(Feature::publish_lockfile())?; + b + } + None => false, + }; + + let custom_metadata = project.metadata.clone(); + let mut manifest = Manifest::new( + summary, + targets, + exclude, + include, + project.links.clone(), + metadata, + custom_metadata, + profiles, + publish, + publish_lockfile, + replace, + patch, + workspace_config, + features, + edition, + project.im_a_teapot, + Rc::clone(me), + ); + if project.license_file.is_some() && project.license.is_some() { + manifest.add_warning( + "only one of `license` or \ + `license-file` is necessary" + .to_string(), + ); + } + for warning in warnings { + manifest.add_warning(warning); + } + for error in errors { + manifest.add_critical_warning(error); + } + + manifest.feature_gate()?; + + Ok((manifest, nested_paths)) + } + + fn to_virtual_manifest( + me: &Rc, + source_id: &SourceId, + root: &Path, + config: &Config, + ) -> CargoResult<(VirtualManifest, Vec)> { + if me.project.is_some() { + bail!("virtual manifests do not define [project]"); + } + if me.package.is_some() { + bail!("virtual manifests do not define [package]"); + } + if me.lib.is_some() { + bail!("virtual manifests do not specify [lib]"); + } + if me.bin.is_some() { + bail!("virtual manifests do not specify [[bin]]"); + } + if me.example.is_some() { + bail!("virtual manifests do not specify [[example]]"); + } + if me.test.is_some() { + bail!("virtual manifests do not specify [[test]]"); + } + if me.bench.is_some() { + bail!("virtual manifests do not specify [[bench]]"); + } + + let mut nested_paths = Vec::new(); + let mut warnings = Vec::new(); + let mut deps = Vec::new(); + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(&cargo_features, &mut warnings)?; + + let (replace, patch) = { + let mut cx = Context { + pkgid: None, + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + platform: None, + features: &features, + root, + }; + (me.replace(&mut cx)?, me.patch(&mut cx)?) + }; + let profiles = build_profiles(&me.profile); + let workspace_config = match me.workspace { + Some(ref config) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + &root, + &config.members, + &config.default_members, + &config.exclude, + )), + None => { + bail!("virtual manifests must be configured with [workspace]"); + } + }; + Ok(( + VirtualManifest::new(replace, patch, workspace_config, profiles), + nested_paths, + )) + } + + fn replace(&self, cx: &mut Context) -> CargoResult> { + if self.patch.is_some() && self.replace.is_some() { + bail!("cannot specify both [replace] and [patch]"); + } + let mut replace = Vec::new(); + for (spec, replacement) in self.replace.iter().flat_map(|x| x) { + let mut spec = PackageIdSpec::parse(spec).chain_err(|| { + format!( + "replacements must specify a valid semver \ + version to replace, but `{}` does not", + spec + ) + })?; + if spec.url().is_none() { + spec.set_url(CRATES_IO.parse().unwrap()); + } + + let version_specified = match *replacement { + TomlDependency::Detailed(ref d) => d.version.is_some(), + TomlDependency::Simple(..) => true, + }; + if version_specified { + bail!( + "replacements cannot specify a version \ + requirement, but found one for `{}`", + spec + ); + } + + let mut dep = replacement.to_dependency(spec.name(), cx, None)?; + { + let version = spec.version().ok_or_else(|| { + format_err!( + "replacements must specify a version \ + to replace, but `{}` does not", + spec + ) + })?; + dep.set_version_req(VersionReq::exact(version)); + } + replace.push((spec, dep)); + } + Ok(replace) + } + + fn patch(&self, cx: &mut Context) -> CargoResult>> { + let mut patch = HashMap::new(); + for (url, deps) in self.patch.iter().flat_map(|x| x) { + let url = match &url[..] { + "crates-io" => CRATES_IO.parse().unwrap(), + _ => url.to_url()?, + }; + patch.insert( + url, + deps.iter() + .map(|(name, dep)| dep.to_dependency(name, cx, None)) + .collect::>>()?, + ); + } + Ok(patch) + } + + fn maybe_custom_build( + &self, + build: &Option, + package_root: &Path, + ) -> Option { + let build_rs = package_root.join("build.rs"); + match *build { + Some(StringOrBool::Bool(false)) => None, // explicitly no build script + Some(StringOrBool::Bool(true)) => Some(build_rs.into()), + Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), + None => { + match fs::metadata(&build_rs) { + // If there is a build.rs file next to the Cargo.toml, assume it is + // a build script + Ok(ref e) if e.is_file() => Some(build_rs.into()), + Ok(_) | Err(_) => None, + } + } + } + } + + pub fn has_profiles(&self) -> bool { + self.profile.is_some() + } +} + +/// Will check a list of build targets, and make sure the target names are unique within a vector. +/// If not, the name of the offending build target is returned. +fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { + let mut seen = HashSet::new(); + for v in targets.iter().map(|e| package_root.join(e.src_path())) { + if !seen.insert(v.clone()) { + return Err(v.display().to_string()); + } + } + Ok(()) +} + +impl TomlDependency { + fn to_dependency( + &self, + name: &str, + cx: &mut Context, + kind: Option, + ) -> CargoResult { + match *self { + TomlDependency::Simple(ref version) => DetailedTomlDependency { + version: Some(version.clone()), + ..Default::default() + }.to_dependency(name, cx, kind), + TomlDependency::Detailed(ref details) => details.to_dependency(name, cx, kind), + } + } +} + +impl DetailedTomlDependency { + fn to_dependency( + &self, + name: &str, + cx: &mut Context, + kind: Option, + ) -> CargoResult { + if self.version.is_none() && self.path.is_none() && self.git.is_none() { + let msg = format!( + "dependency ({}) specified without \ + providing a local path, Git repository, or \ + version to use. This will be considered an \ + error in future versions", + name + ); + cx.warnings.push(msg); + } + + if self.git.is_none() { + let git_only_keys = [ + (&self.branch, "branch"), + (&self.tag, "tag"), + (&self.rev, "rev"), + ]; + + for &(key, key_name) in &git_only_keys { + if key.is_some() { + let msg = format!( + "key `{}` is ignored for dependency ({}). \ + This will be considered an error in future versions", + key_name, name + ); + cx.warnings.push(msg) + } + } + } + + let registry_id = match self.registry { + Some(ref registry) => { + cx.features.require(Feature::alternative_registries())?; + SourceId::alt_registry(cx.config, registry)? + } + None => SourceId::crates_io(cx.config)?, + }; + + let new_source_id = match ( + self.git.as_ref(), + self.path.as_ref(), + self.registry.as_ref(), + self.registry_index.as_ref(), + ) { + (Some(_), _, Some(_), _) | (Some(_), _, _, Some(_)) => bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `registry` is allowed.", + name + ), + (_, _, Some(_), Some(_)) => bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `registry` or `registry-index` is allowed.", + name + ), + (Some(git), maybe_path, _, _) => { + if maybe_path.is_some() { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `path` is allowed. \ + This will be considered an error in future versions", + name + ); + cx.warnings.push(msg) + } + + let n_details = [&self.branch, &self.tag, &self.rev] + .iter() + .filter(|d| d.is_some()) + .count(); + + if n_details > 1 { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `branch`, `tag` or `rev` is allowed. \ + This will be considered an error in future versions", + name + ); + cx.warnings.push(msg) + } + + let reference = self.branch + .clone() + .map(GitReference::Branch) + .or_else(|| self.tag.clone().map(GitReference::Tag)) + .or_else(|| self.rev.clone().map(GitReference::Rev)) + .unwrap_or_else(|| GitReference::Branch("master".to_string())); + let loc = git.to_url()?; + SourceId::for_git(&loc, reference)? + } + (None, Some(path), _, _) => { + cx.nested_paths.push(PathBuf::from(path)); + // If the source id for the package we're parsing is a path + // source, then we normalize the path here to get rid of + // components like `..`. + // + // The purpose of this is to get a canonical id for the package + // that we're depending on to ensure that builds of this package + // always end up hashing to the same value no matter where it's + // built from. + if cx.source_id.is_path() { + let path = cx.root.join(path); + let path = util::normalize_path(&path); + SourceId::for_path(&path)? + } else { + cx.source_id.clone() + } + } + (None, None, Some(registry), None) => SourceId::alt_registry(cx.config, registry)?, + (None, None, None, Some(registry_index)) => { + let url = registry_index.to_url()?; + SourceId::for_registry(&url)? + } + (None, None, None, None) => SourceId::crates_io(cx.config)?, + }; + + let (pkg_name, rename) = match self.package { + Some(ref s) => (&s[..], Some(name)), + None => (name, None), + }; + + let version = self.version.as_ref().map(|v| &v[..]); + let mut dep = match cx.pkgid { + Some(id) => Dependency::parse(pkg_name, version, &new_source_id, id, cx.config)?, + None => Dependency::parse_no_deprecated(name, version, &new_source_id)?, + }; + dep.set_features(self.features.clone().unwrap_or_default()) + .set_default_features( + self.default_features + .or(self.default_features2) + .unwrap_or(true), + ) + .set_optional(self.optional.unwrap_or(false)) + .set_platform(cx.platform.clone()) + .set_registry_id(®istry_id); + if let Some(kind) = kind { + dep.set_kind(kind); + } + if let Some(rename) = rename { + cx.features.require(Feature::rename_dependency())?; + dep.set_rename(rename); + } + Ok(dep) + } +} + +#[derive(Default, Serialize, Deserialize, Debug, Clone)] +struct TomlTarget { + name: Option, + + // The intention was to only accept `crate-type` here but historical + // versions of Cargo also accepted `crate_type`, so look for both. + #[serde(rename = "crate-type")] + crate_type: Option>, + #[serde(rename = "crate_type")] + crate_type2: Option>, + + path: Option, + test: Option, + doctest: Option, + bench: Option, + doc: Option, + plugin: Option, + #[serde(rename = "proc-macro")] + proc_macro: Option, + #[serde(rename = "proc_macro")] + proc_macro2: Option, + harness: Option, + #[serde(rename = "required-features")] + required_features: Option>, +} + +#[derive(Clone)] +struct PathValue(PathBuf); + +impl<'de> de::Deserialize<'de> for PathValue { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ok(PathValue(String::deserialize(deserializer)?.into())) + } +} + +impl ser::Serialize for PathValue { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + self.0.serialize(serializer) + } +} + +/// Corresponds to a `target` entry, but `TomlTarget` is already used. +#[derive(Serialize, Deserialize, Debug)] +struct TomlPlatform { + dependencies: Option>, + #[serde(rename = "build-dependencies")] + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + #[serde(rename = "dev-dependencies")] + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, +} + +impl TomlTarget { + fn new() -> TomlTarget { + TomlTarget::default() + } + + fn name(&self) -> String { + match self.name { + Some(ref name) => name.clone(), + None => panic!("target name is required"), + } + } + + fn proc_macro(&self) -> Option { + self.proc_macro.or(self.proc_macro2) + } + + fn crate_types(&self) -> Option<&Vec> { + self.crate_type + .as_ref() + .or_else(|| self.crate_type2.as_ref()) + } +} + +impl fmt::Debug for PathValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +fn build_profiles(profiles: &Option) -> Profiles { + let profiles = profiles.as_ref(); + Profiles::new( + profiles.and_then(|p| p.dev.clone()), + profiles.and_then(|p| p.release.clone()), + profiles.and_then(|p| p.test.clone()), + profiles.and_then(|p| p.bench.clone()), + profiles.and_then(|p| p.doc.clone()), + ) +} diff --git a/src/cargo/util/toml/targets.rs b/src/cargo/util/toml/targets.rs new file mode 100644 index 000000000..1bbdf20e7 --- /dev/null +++ b/src/cargo/util/toml/targets.rs @@ -0,0 +1,737 @@ +//! This module implements Cargo conventions for directory layout: +//! +//! * `src/lib.rs` is a library +//! * `src/main.rs` is a binary +//! * `src/bin/*.rs` are binaries +//! * `examples/*.rs` are examples +//! * `tests/*.rs` are integration tests +//! * `benches/*.rs` are benchmarks +//! +//! It is a bit tricky because we need match explicit information from `Cargo.toml` +//! with implicit info in directory layout. + +use std::path::{Path, PathBuf}; +use std::fs::{self, DirEntry}; +use std::collections::HashSet; + +use core::{compiler, Edition, Target}; +use util::errors::CargoResult; +use super::{LibKind, PathValue, StringOrBool, TomlBenchTarget, TomlBinTarget, TomlExampleTarget, + TomlLibTarget, TomlManifest, TomlTarget, TomlTestTarget}; + +pub fn targets( + manifest: &TomlManifest, + package_name: &str, + package_root: &Path, + edition: Edition, + custom_build: &Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let mut targets = Vec::new(); + + let has_lib; + + if let Some(target) = clean_lib( + manifest.lib.as_ref(), + package_root, + package_name, + edition, + warnings, + )? { + targets.push(target); + has_lib = true; + } else { + has_lib = false; + } + + let package = manifest + .package + .as_ref() + .or_else(|| manifest.project.as_ref()) + .ok_or_else(|| format_err!("manifest has no `package` (or `project`)"))?; + + targets.extend(clean_bins( + manifest.bin.as_ref(), + package_root, + package_name, + edition, + package.autobins, + warnings, + errors, + has_lib, + )?); + + targets.extend(clean_examples( + manifest.example.as_ref(), + package_root, + edition, + package.autoexamples, + warnings, + errors, + )?); + + targets.extend(clean_tests( + manifest.test.as_ref(), + package_root, + edition, + package.autotests, + warnings, + errors, + )?); + + targets.extend(clean_benches( + manifest.bench.as_ref(), + package_root, + edition, + package.autobenches, + warnings, + errors, + )?); + + // processing the custom build script + if let Some(custom_build) = manifest.maybe_custom_build(custom_build, package_root) { + let name = format!( + "build-script-{}", + custom_build + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("") + ); + targets.push(Target::custom_build_target( + &name, + package_root.join(custom_build), + )); + } + + Ok(targets) +} + +fn clean_lib( + toml_lib: Option<&TomlLibTarget>, + package_root: &Path, + package_name: &str, + edition: Edition, + warnings: &mut Vec, +) -> CargoResult> { + let inferred = inferred_lib(package_root); + let lib = match toml_lib { + Some(lib) => { + if let Some(ref name) = lib.name { + // XXX: other code paths dodge this validation + if name.contains('-') { + bail!("library target names cannot contain hyphens: {}", name) + } + } + Some(TomlTarget { + name: lib.name.clone().or_else(|| Some(package_name.to_owned())), + ..lib.clone() + }) + } + None => inferred.as_ref().map(|lib| TomlTarget { + name: Some(package_name.to_string()), + path: Some(PathValue(lib.clone())), + ..TomlTarget::new() + }), + }; + + let lib = match lib { + Some(ref lib) => lib, + None => return Ok(None), + }; + + validate_has_name(lib, "library", "lib")?; + + let path = match (lib.path.as_ref(), inferred) { + (Some(path), _) => package_root.join(&path.0), + (None, Some(path)) => path, + (None, None) => { + let legacy_path = package_root.join("src").join(format!("{}.rs", lib.name())); + if edition < Edition::Edition2018 && legacy_path.exists() { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for library `{}`,\n\ + please rename the file to `src/lib.rs` or set lib.path in Cargo.toml", + legacy_path.display(), + lib.name() + )); + legacy_path + } else { + bail!( + "can't find library `{}`, \ + rename file to `src/lib.rs` or specify lib.path", + lib.name() + ) + } + } + }; + + // Per the Macros 1.1 RFC: + // + // > Initially if a crate is compiled with the proc-macro crate type + // > (and possibly others) it will forbid exporting any items in the + // > crate other than those functions tagged #[proc_macro_derive] and + // > those functions must also be placed at the crate root. + // + // A plugin requires exporting plugin_registrar so a crate cannot be + // both at once. + let crate_types = match (lib.crate_types(), lib.plugin, lib.proc_macro()) { + (_, Some(true), Some(true)) => bail!("lib.plugin and lib.proc-macro cannot both be true"), + (Some(kinds), _, _) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), + (None, Some(true), _) => vec![LibKind::Dylib], + (None, _, Some(true)) => vec![LibKind::ProcMacro], + (None, _, _) => vec![LibKind::Lib], + }; + + let mut target = Target::lib_target(&lib.name(), crate_types, path); + configure(lib, &mut target); + Ok(Some(target)) +} + +fn clean_bins( + toml_bins: Option<&Vec>, + package_root: &Path, + package_name: &str, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + has_lib: bool, +) -> CargoResult> { + let inferred = inferred_bins(package_root, package_name); + + let bins = toml_targets_and_inferred( + toml_bins, + &inferred, + package_root, + autodiscover, + edition, + warnings, + "binary", + "bin", + "autobins", + ); + + for bin in &bins { + validate_has_name(bin, "binary", "bin")?; + + let name = bin.name(); + + if let Some(crate_types) = bin.crate_types() { + if !crate_types.is_empty() { + errors.push(format!( + "the target `{}` is a binary and can't have any \ + crate-types set (currently \"{}\")", + name, + crate_types.join(", ") + )); + } + } + + if bin.proc_macro() == Some(true) { + errors.push(format!( + "the target `{}` is a binary and can't have `proc-macro` \ + set `true`", + name + )); + } + + if compiler::is_bad_artifact_name(&name) { + bail!("the binary target name `{}` is forbidden", name) + } + } + + validate_unique_names(&bins, "binary")?; + + let mut result = Vec::new(); + for bin in &bins { + let path = target_path(bin, &inferred, "bin", package_root, edition, &mut |_| { + if let Some(legacy_path) = legacy_bin_path(package_root, &bin.name(), has_lib) { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for binary `{}`,\n\ + please set bin.path in Cargo.toml", + legacy_path.display(), + bin.name() + )); + Some(legacy_path) + } else { + None + } + }); + let path = match path { + Ok(path) => path, + Err(e) => bail!("{}", e), + }; + + let mut target = Target::bin_target(&bin.name(), path, bin.required_features.clone()); + configure(bin, &mut target); + result.push(target); + } + return Ok(result); + + fn legacy_bin_path(package_root: &Path, name: &str, has_lib: bool) -> Option { + if !has_lib { + let path = package_root.join("src").join(format!("{}.rs", name)); + if path.exists() { + return Some(path); + } + } + let path = package_root.join("src").join("main.rs"); + if path.exists() { + return Some(path); + } + + let path = package_root.join("src").join("bin").join("main.rs"); + if path.exists() { + return Some(path); + } + None + } +} + +fn clean_examples( + toml_examples: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let inferred = infer_from_directory(&package_root.join("examples")); + + let targets = clean_targets( + "example", + "example", + toml_examples, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + "autoexamples", + )?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let crate_types = match toml.crate_types() { + Some(kinds) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), + None => Vec::new(), + }; + + let mut target = Target::example_target( + &toml.name(), + crate_types, + path, + toml.required_features.clone(), + ); + configure(&toml, &mut target); + result.push(target); + } + + Ok(result) +} + +fn clean_tests( + toml_tests: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let inferred = infer_from_directory(&package_root.join("tests")); + + let targets = clean_targets( + "test", + "test", + toml_tests, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + "autotests", + )?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = Target::test_target(&toml.name(), path, toml.required_features.clone()); + configure(&toml, &mut target); + result.push(target); + } + Ok(result) +} + +fn clean_benches( + toml_benches: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let mut legacy_warnings = vec![]; + + let targets = { + let mut legacy_bench_path = |bench: &TomlTarget| { + let legacy_path = package_root.join("src").join("bench.rs"); + if !(bench.name() == "bench" && legacy_path.exists()) { + return None; + } + legacy_warnings.push(format!( + "path `{}` was erroneously implicitly accepted for benchmark `{}`,\n\ + please set bench.path in Cargo.toml", + legacy_path.display(), + bench.name() + )); + Some(legacy_path) + }; + + let inferred = infer_from_directory(&package_root.join("benches")); + + clean_targets_with_legacy_path( + "benchmark", + "bench", + toml_benches, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + &mut legacy_bench_path, + "autobenches", + )? + }; + + warnings.append(&mut legacy_warnings); + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = Target::bench_target(&toml.name(), path, toml.required_features.clone()); + configure(&toml, &mut target); + result.push(target); + } + + Ok(result) +} + +fn clean_targets( + target_kind_human: &str, + target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + autodiscover_flag_name: &str, +) -> CargoResult> { + clean_targets_with_legacy_path( + target_kind_human, + target_kind, + toml_targets, + inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + &mut |_| None, + autodiscover_flag_name, + ) +} + +fn clean_targets_with_legacy_path( + target_kind_human: &str, + target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + legacy_path: &mut FnMut(&TomlTarget) -> Option, + autodiscover_flag_name: &str, +) -> CargoResult> { + let toml_targets = toml_targets_and_inferred( + toml_targets, + inferred, + package_root, + autodiscover, + edition, + warnings, + target_kind_human, + target_kind, + autodiscover_flag_name, + ); + + for target in &toml_targets { + validate_has_name(target, target_kind_human, target_kind)?; + } + + validate_unique_names(&toml_targets, target_kind)?; + let mut result = Vec::new(); + for target in toml_targets { + let path = target_path(&target, inferred, target_kind, package_root, edition, legacy_path); + let path = match path { + Ok(path) => path, + Err(e) => { + errors.push(e); + continue; + } + }; + result.push((path, target)); + } + Ok(result) +} + +fn inferred_lib(package_root: &Path) -> Option { + let lib = package_root.join("src").join("lib.rs"); + if fs::metadata(&lib).is_ok() { + Some(lib) + } else { + None + } +} + +fn inferred_bins(package_root: &Path, package_name: &str) -> Vec<(String, PathBuf)> { + let main = package_root.join("src").join("main.rs"); + let mut result = Vec::new(); + if main.exists() { + result.push((package_name.to_string(), main)); + } + result.extend(infer_from_directory(&package_root.join("src").join("bin"))); + + result +} + +fn infer_from_directory(directory: &Path) -> Vec<(String, PathBuf)> { + let entries = match fs::read_dir(directory) { + Err(_) => return Vec::new(), + Ok(dir) => dir, + }; + + entries + .filter_map(|e| e.ok()) + .filter(is_not_dotfile) + .filter_map(|d| infer_any(&d)) + .collect() +} + +fn infer_any(entry: &DirEntry) -> Option<(String, PathBuf)> { + if entry.path().extension().and_then(|p| p.to_str()) == Some("rs") { + infer_file(entry) + } else if entry.file_type().map(|t| t.is_dir()).ok() == Some(true) { + infer_subdirectory(entry) + } else { + None + } +} + +fn infer_file(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + path.file_stem() + .and_then(|p| p.to_str()) + .map(|p| (p.to_owned(), path.clone())) +} + +fn infer_subdirectory(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + let main = path.join("main.rs"); + let name = path.file_name().and_then(|n| n.to_str()); + match (name, main.exists()) { + (Some(name), true) => Some((name.to_owned(), main)), + _ => None, + } +} + +fn is_not_dotfile(entry: &DirEntry) -> bool { + entry.file_name().to_str().map(|s| s.starts_with('.')) == Some(false) +} + +fn toml_targets_and_inferred( + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + autodiscover: Option, + edition: Edition, + warnings: &mut Vec, + target_kind_human: &str, + target_kind: &str, + autodiscover_flag_name: &str, +) -> Vec { + let inferred_targets = inferred_to_toml_targets(inferred); + match toml_targets { + None => inferred_targets, + Some(targets) => { + let mut targets = targets.clone(); + + let target_path = + |target: &TomlTarget| target.path.clone().map(|p| package_root.join(p.0)); + + let mut seen_names = HashSet::new(); + let mut seen_paths = HashSet::new(); + for target in targets.iter() { + seen_names.insert(target.name.clone()); + seen_paths.insert(target_path(target)); + } + + let mut rem_targets = vec![]; + for target in inferred_targets { + if !seen_names.contains(&target.name) && !seen_paths.contains(&target_path(&target)) + { + rem_targets.push(target); + } + } + + let autodiscover = match autodiscover { + Some(autodiscover) => autodiscover, + None => match edition { + Edition::Edition2018 => true, + Edition::Edition2015 => { + if !rem_targets.is_empty() { + let mut rem_targets_str = String::new(); + for t in rem_targets.iter() { + if let Some(p) = t.path.clone() { + rem_targets_str.push_str(&format!("* {}\n", p.0.display())) + } + } + warnings.push(format!( + "\ +An explicit [[{section}]] section is specified in Cargo.toml which currently +disables Cargo from automatically inferring other {target_kind_human} targets. +This inference behavior will change in the Rust 2018 edition and the following +files will be included as a {target_kind_human} target: + +{rem_targets_str} +This is likely to break cargo build or cargo test as these files may not be +ready to be compiled as a {target_kind_human} target today. You can future-proof yourself +and disable this warning by adding `{autodiscover_flag_name} = false` to your [package] +section. You may also move the files to a location where Cargo would not +automatically infer them to be a target, such as in subfolders. + +For more information on this warning you can consult +https://github.com/rust-lang/cargo/issues/5330", + section = target_kind, + target_kind_human = target_kind_human, + rem_targets_str = rem_targets_str, + autodiscover_flag_name = autodiscover_flag_name, + )); + }; + false + } + }, + }; + + if autodiscover { + targets.append(&mut rem_targets); + } + + targets + } + } +} + +fn inferred_to_toml_targets(inferred: &[(String, PathBuf)]) -> Vec { + inferred + .iter() + .map(|&(ref name, ref path)| TomlTarget { + name: Some(name.clone()), + path: Some(PathValue(path.clone())), + ..TomlTarget::new() + }) + .collect() +} + +fn validate_has_name( + target: &TomlTarget, + target_kind_human: &str, + target_kind: &str, +) -> CargoResult<()> { + match target.name { + Some(ref name) => if name.trim().is_empty() { + bail!("{} target names cannot be empty", target_kind_human) + }, + None => bail!( + "{} target {}.name is required", + target_kind_human, + target_kind + ), + } + + Ok(()) +} + +/// Will check a list of toml targets, and make sure the target names are unique within a vector. +fn validate_unique_names(targets: &[TomlTarget], target_kind: &str) -> CargoResult<()> { + let mut seen = HashSet::new(); + for name in targets.iter().map(|e| e.name()) { + if !seen.insert(name.clone()) { + bail!( + "found duplicate {target_kind} name {name}, \ + but all {target_kind} targets must have a unique name", + target_kind = target_kind, + name = name + ); + } + } + Ok(()) +} + +fn configure(toml: &TomlTarget, target: &mut Target) { + let t2 = target.clone(); + target + .set_tested(toml.test.unwrap_or_else(|| t2.tested())) + .set_doc(toml.doc.unwrap_or_else(|| t2.documented())) + .set_doctest(toml.doctest.unwrap_or_else(|| t2.doctested())) + .set_benched(toml.bench.unwrap_or_else(|| t2.benched())) + .set_harness(toml.harness.unwrap_or_else(|| t2.harness())) + .set_for_host(match (toml.plugin, toml.proc_macro()) { + (None, None) => t2.for_host(), + (Some(true), _) | (_, Some(true)) => true, + (Some(false), _) | (_, Some(false)) => false, + }); +} + +fn target_path( + target: &TomlTarget, + inferred: &[(String, PathBuf)], + target_kind: &str, + package_root: &Path, + edition: Edition, + legacy_path: &mut FnMut(&TomlTarget) -> Option, +) -> Result { + if let Some(ref path) = target.path { + // Should we verify that this path exists here? + return Ok(package_root.join(&path.0)); + } + let name = target.name(); + + let mut matching = inferred + .iter() + .filter(|&&(ref n, _)| n == &name) + .map(|&(_, ref p)| p.clone()); + + let first = matching.next(); + let second = matching.next(); + match (first, second) { + (Some(path), None) => Ok(path), + (None, None) | (Some(_), Some(_)) => { + if edition < Edition::Edition2018 { + if let Some(path) = legacy_path(target) { + return Ok(path); + } + } + Err(format!( + "can't find `{name}` {target_kind}, specify {target_kind}.path", + name = name, + target_kind = target_kind + )) + } + (None, Some(_)) => unreachable!(), + } +} diff --git a/src/cargo/util/vcs.rs b/src/cargo/util/vcs.rs new file mode 100644 index 000000000..1eb447a59 --- /dev/null +++ b/src/cargo/util/vcs.rs @@ -0,0 +1,78 @@ +use std::path::Path; +use std::fs::create_dir; + +use git2; + +use util::{process, CargoResult}; + +pub struct HgRepo; +pub struct GitRepo; +pub struct PijulRepo; +pub struct FossilRepo; + +impl GitRepo { + pub fn init(path: &Path, _: &Path) -> CargoResult { + git2::Repository::init(path)?; + Ok(GitRepo) + } + pub fn discover(path: &Path, _: &Path) -> Result { + git2::Repository::discover(path) + } +} + +impl HgRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("hg").cwd(cwd).arg("init").arg(path).exec()?; + Ok(HgRepo) + } + pub fn discover(path: &Path, cwd: &Path) -> CargoResult { + process("hg") + .cwd(cwd) + .arg("root") + .cwd(path) + .exec_with_output()?; + Ok(HgRepo) + } +} + +impl PijulRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("pijul").cwd(cwd).arg("init").arg(path).exec()?; + Ok(PijulRepo) + } +} + +impl FossilRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + // fossil doesn't create the directory so we'll do that first + create_dir(path)?; + + // set up the paths we'll use + let db_fname = ".fossil"; + let mut db_path = path.to_owned(); + db_path.push(db_fname); + + // then create the fossil DB in that location + process("fossil").cwd(cwd).arg("init").arg(&db_path).exec()?; + + // open it in that new directory + process("fossil") + .cwd(&path) + .arg("open") + .arg(db_fname) + .exec()?; + + // set `target` as ignoreable and cleanable + process("fossil") + .cwd(cwd) + .arg("settings") + .arg("ignore-glob") + .arg("target"); + process("fossil") + .cwd(cwd) + .arg("settings") + .arg("clean-glob") + .arg("target"); + Ok(FossilRepo) + } +} diff --git a/src/crates-io/Cargo.toml b/src/crates-io/Cargo.toml new file mode 100644 index 000000000..2c8aa8f34 --- /dev/null +++ b/src/crates-io/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "crates-io" +version = "0.16.0" +authors = ["Alex Crichton "] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/cargo" +description = """ +Helpers for interacting with crates.io +""" + +[lib] +name = "crates_io" +path = "lib.rs" + +[dependencies] +curl = "0.4" +failure = "0.1.1" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +url = "1.0" diff --git a/src/crates-io/LICENSE-APACHE b/src/crates-io/LICENSE-APACHE new file mode 120000 index 000000000..1cd601d0a --- /dev/null +++ b/src/crates-io/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/src/crates-io/LICENSE-MIT b/src/crates-io/LICENSE-MIT new file mode 120000 index 000000000..b2cfbdc7b --- /dev/null +++ b/src/crates-io/LICENSE-MIT @@ -0,0 +1 @@ +../../LICENSE-MIT \ No newline at end of file diff --git a/src/crates-io/lib.rs b/src/crates-io/lib.rs new file mode 100644 index 000000000..11e319334 --- /dev/null +++ b/src/crates-io/lib.rs @@ -0,0 +1,335 @@ +#![allow(unknown_lints)] + +extern crate curl; +#[macro_use] +extern crate failure; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; +extern crate url; + +use std::collections::BTreeMap; +use std::fs::File; +use std::io::prelude::*; +use std::io::Cursor; + +use curl::easy::{Easy, List}; +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +pub type Result = std::result::Result; + +pub struct Registry { + host: String, + token: Option, + handle: Easy, +} + +#[derive(PartialEq, Clone, Copy)] +pub enum Auth { + Authorized, + Unauthorized, +} + +#[derive(Deserialize)] +pub struct Crate { + pub name: String, + pub description: Option, + pub max_version: String, +} + +#[derive(Serialize)] +pub struct NewCrate { + pub name: String, + pub vers: String, + pub deps: Vec, + pub features: BTreeMap>, + pub authors: Vec, + pub description: Option, + pub documentation: Option, + pub homepage: Option, + pub readme: Option, + pub readme_file: Option, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub repository: Option, + pub badges: BTreeMap>, + #[serde(default)] pub links: Option, +} + +#[derive(Serialize)] +pub struct NewCrateDependency { + pub optional: bool, + pub default_features: bool, + pub name: String, + pub features: Vec, + pub version_req: String, + pub target: Option, + pub kind: String, + #[serde(skip_serializing_if = "Option::is_none")] pub registry: Option, +} + +#[derive(Deserialize)] +pub struct User { + pub id: u32, + pub login: String, + pub avatar: Option, + pub email: Option, + pub name: Option, +} + +pub struct Warnings { + pub invalid_categories: Vec, + pub invalid_badges: Vec, +} + +#[derive(Deserialize)] +struct R { + ok: bool, +} +#[derive(Deserialize)] +struct OwnerResponse { + ok: bool, + msg: String, +} +#[derive(Deserialize)] +struct ApiErrorList { + errors: Vec, +} +#[derive(Deserialize)] +struct ApiError { + detail: String, +} +#[derive(Serialize)] +struct OwnersReq<'a> { + users: &'a [&'a str], +} +#[derive(Deserialize)] +struct Users { + users: Vec, +} +#[derive(Deserialize)] +struct TotalCrates { + total: u32, +} +#[derive(Deserialize)] +struct Crates { + crates: Vec, + meta: TotalCrates, +} +impl Registry { + pub fn new(host: String, token: Option) -> Registry { + Registry::new_handle(host, token, Easy::new()) + } + + pub fn new_handle(host: String, token: Option, handle: Easy) -> Registry { + Registry { + host, + token, + handle, + } + } + + pub fn add_owners(&mut self, krate: &str, owners: &[&str]) -> Result { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.put(format!("/crates/{}/owners", krate), body.as_bytes())?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(serde_json::from_str::(&body)?.msg) + } + + pub fn remove_owners(&mut self, krate: &str, owners: &[&str]) -> Result<()> { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.delete(format!("/crates/{}/owners", krate), Some(body.as_bytes()))?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn list_owners(&mut self, krate: &str) -> Result> { + let body = self.get(format!("/crates/{}/owners", krate))?; + Ok(serde_json::from_str::(&body)?.users) + } + + pub fn publish(&mut self, krate: &NewCrate, tarball: &File) -> Result { + let json = serde_json::to_string(krate)?; + // Prepare the body. The format of the upload request is: + // + // + // (metadata for the package) + // + // + let stat = tarball.metadata()?; + let header = { + let mut w = Vec::new(); + w.extend( + [ + (json.len() >> 0) as u8, + (json.len() >> 8) as u8, + (json.len() >> 16) as u8, + (json.len() >> 24) as u8, + ].iter().cloned(), + ); + w.extend(json.as_bytes().iter().cloned()); + w.extend( + [ + (stat.len() >> 0) as u8, + (stat.len() >> 8) as u8, + (stat.len() >> 16) as u8, + (stat.len() >> 24) as u8, + ].iter().cloned(), + ); + w + }; + let size = stat.len() as usize + header.len(); + let mut body = Cursor::new(header).chain(tarball); + + let url = format!("{}/api/v1/crates/new", self.host); + + let token = match self.token.as_ref() { + Some(s) => s, + None => bail!("no upload token found, please run `cargo login`"), + }; + self.handle.put(true)?; + self.handle.url(&url)?; + self.handle.in_filesize(size as u64)?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append(&format!("Authorization: {}", token))?; + self.handle.http_headers(headers)?; + + let body = handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0))?; + + let response = if body.is_empty() { + "{}".parse()? + } else { + body.parse::()? + }; + + let invalid_categories: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_categories")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + let invalid_badges: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_badges")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + Ok(Warnings { + invalid_categories, + invalid_badges, + }) + } + + pub fn search(&mut self, query: &str, limit: u32) -> Result<(Vec, u32)> { + let formatted_query = percent_encode(query.as_bytes(), QUERY_ENCODE_SET); + let body = self.req( + format!("/crates?q={}&per_page={}", formatted_query, limit), + None, + Auth::Unauthorized, + )?; + + let crates = serde_json::from_str::(&body)?; + Ok((crates.crates, crates.meta.total)) + } + + pub fn yank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.delete(format!("/crates/{}/{}/yank", krate, version), None)?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn unyank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.put(format!("/crates/{}/{}/unyank", krate, version), &[])?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + fn put(&mut self, path: String, b: &[u8]) -> Result { + self.handle.put(true)?; + self.req(path, Some(b), Auth::Authorized) + } + + fn get(&mut self, path: String) -> Result { + self.handle.get(true)?; + self.req(path, None, Auth::Authorized) + } + + fn delete(&mut self, path: String, b: Option<&[u8]>) -> Result { + self.handle.custom_request("DELETE")?; + self.req(path, b, Auth::Authorized) + } + + fn req(&mut self, path: String, body: Option<&[u8]>, authorized: Auth) -> Result { + self.handle.url(&format!("{}/api/v1{}", self.host, path))?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append("Content-Type: application/json")?; + + if authorized == Auth::Authorized { + let token = match self.token.as_ref() { + Some(s) => s, + None => bail!("no upload token found, please run `cargo login`"), + }; + headers.append(&format!("Authorization: {}", token))?; + } + self.handle.http_headers(headers)?; + match body { + Some(mut body) => { + self.handle.upload(true)?; + self.handle.in_filesize(body.len() as u64)?; + handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0)) + } + None => handle(&mut self.handle, &mut |_| 0), + } + } +} + +fn handle(handle: &mut Easy, read: &mut FnMut(&mut [u8]) -> usize) -> Result { + let mut headers = Vec::new(); + let mut body = Vec::new(); + { + let mut handle = handle.transfer(); + handle.read_function(|buf| Ok(read(buf)))?; + handle.write_function(|data| { + body.extend_from_slice(data); + Ok(data.len()) + })?; + handle.header_function(|data| { + headers.push(String::from_utf8_lossy(data).into_owned()); + true + })?; + handle.perform()?; + } + + match handle.response_code()? { + 0 => {} // file upload url sometimes + 200 => {} + 403 => bail!("received 403 unauthorized response code"), + 404 => bail!("received 404 not found response code"), + code => bail!( + "failed to get a 200 OK response, got {}\n\ + headers:\n\ + \t{}\n\ + body:\n\ + {}", + code, + headers.join("\n\t"), + String::from_utf8_lossy(&body) + ), + } + + let body = match String::from_utf8(body) { + Ok(body) => body, + Err(..) => bail!("response body was not valid utf-8"), + }; + if let Ok(errors) = serde_json::from_str::(&body) { + let errors = errors.errors.into_iter().map(|s| s.detail); + bail!("api errors: {}", errors.collect::>().join(", ")); + } + Ok(body) +} diff --git a/src/doc/README.md b/src/doc/README.md new file mode 100644 index 000000000..983c96693 --- /dev/null +++ b/src/doc/README.md @@ -0,0 +1,47 @@ +# The Cargo Book + + +### Requirements + +Building the book requires [mdBook]. To get it: + +[mdBook]: https://github.com/azerupi/mdBook + +```console +$ cargo install mdbook +``` + +### Building + +To build the book: + +```console +$ mdbook build +``` + +The output will be in the `book` subdirectory. To check it out, open it in +your web browser. + +_Firefox:_ +```console +$ firefox book/index.html # Linux +$ open -a "Firefox" book/index.html # OS X +$ Start-Process "firefox.exe" .\book\index.html # Windows (PowerShell) +$ start firefox.exe .\book\index.html # Windows (Cmd) +``` + +_Chrome:_ +```console +$ google-chrome book/index.html # Linux +$ open -a "Google Chrome" book/index.html # OS X +$ Start-Process "chrome.exe" .\book\index.html # Windows (PowerShell) +$ start chrome.exe .\book\index.html # Windows (Cmd) +``` + + +## Contributing + +Given that the book is still in a draft state, we'd love your help! Please feel free to open +issues about anything, and send in PRs for things you'd like to fix or change. If your change is +large, please open an issue first, so we can make sure that it's something we'd accept before you +go through the work of getting a PR together. diff --git a/src/doc/book.toml b/src/doc/book.toml new file mode 100644 index 000000000..1f21e1e2e --- /dev/null +++ b/src/doc/book.toml @@ -0,0 +1,2 @@ +title = "The Cargo Book" +author = "Alex Crichton, Steve Klabnik and Carol Nichols, with Contributions from the Rust Community" diff --git a/src/doc/src/SUMMARY.md b/src/doc/src/SUMMARY.md new file mode 100644 index 000000000..5330684da --- /dev/null +++ b/src/doc/src/SUMMARY.md @@ -0,0 +1,32 @@ +# Summary + +[Introduction](index.md) + +* [Getting Started](getting-started/index.md) + * [Installation](getting-started/installation.md) + * [First Steps with Cargo](getting-started/first-steps.md) + +* [Cargo Guide](guide/index.md) + * [Why Cargo Exists](guide/why-cargo-exists.md) + * [Creating a New Project](guide/creating-a-new-project.md) + * [Working on an Existing Project](guide/working-on-an-existing-project.md) + * [Dependencies](guide/dependencies.md) + * [Project Layout](guide/project-layout.md) + * [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.md) + * [Tests](guide/tests.md) + * [Continuous Integration](guide/continuous-integration.md) + * [Build Cache](guide/build-cache.md) + +* [Cargo Reference](reference/index.md) + * [Specifying Dependencies](reference/specifying-dependencies.md) + * [The Manifest Format](reference/manifest.md) + * [Configuration](reference/config.md) + * [Environment Variables](reference/environment-variables.md) + * [Build Scripts](reference/build-scripts.md) + * [Publishing on crates.io](reference/publishing.md) + * [Package ID Specifications](reference/pkgid-spec.md) + * [Source Replacement](reference/source-replacement.md) + * [External Tools](reference/external-tools.md) + * [Unstable Features](reference/unstable.md) + +* [FAQ](faq.md) diff --git a/src/doc/src/faq.md b/src/doc/src/faq.md new file mode 100644 index 000000000..7ee2d2b7c --- /dev/null +++ b/src/doc/src/faq.md @@ -0,0 +1,193 @@ +## Frequently Asked Questions + +### Is the plan to use GitHub as a package repository? + +No. The plan for Cargo is to use [crates.io], like npm or Rubygems do with +npmjs.org and rubygems.org. + +We plan to support git repositories as a source of packages forever, +because they can be used for early development and temporary patches, +even when people use the registry as the primary source of packages. + +### Why build crates.io rather than use GitHub as a registry? + +We think that it’s very important to support multiple ways to download +packages, including downloading from GitHub and copying packages into +your project itself. + +That said, we think that [crates.io] offers a number of important benefits, and +will likely become the primary way that people download packages in Cargo. + +For precedent, both Node.js’s [npm][1] and Ruby’s [bundler][2] support both a +central registry model as well as a Git-based model, and most packages +are downloaded through the registry in those ecosystems, with an +important minority of packages making use of git-based packages. + +[1]: https://www.npmjs.org +[2]: https://bundler.io + +Some of the advantages that make a central registry popular in other +languages include: + +* **Discoverability**. A central registry provides an easy place to look + for existing packages. Combined with tagging, this also makes it + possible for a registry to provide ecosystem-wide information, such as a + list of the most popular or most-depended-on packages. +* **Speed**. A central registry makes it possible to easily fetch just + the metadata for packages quickly and efficiently, and then to + efficiently download just the published package, and not other bloat + that happens to exist in the repository. This adds up to a significant + improvement in the speed of dependency resolution and fetching. As + dependency graphs scale up, downloading all of the git repositories bogs + down fast. Also remember that not everybody has a high-speed, + low-latency Internet connection. + +### Will Cargo work with C code (or other languages)? + +Yes! + +Cargo handles compiling Rust code, but we know that many Rust projects +link against C code. We also know that there are decades of tooling +built up around compiling languages other than Rust. + +Our solution: Cargo allows a package to [specify a script](reference/build-scripts.html) +(written in Rust) to run before invoking `rustc`. Rust is leveraged to +implement platform-specific configuration and refactor out common build +functionality among packages. + +### Can Cargo be used inside of `make` (or `ninja`, or ...) + +Indeed. While we intend Cargo to be useful as a standalone way to +compile Rust projects at the top-level, we know that some people will +want to invoke Cargo from other build tools. + +We have designed Cargo to work well in those contexts, paying attention +to things like error codes and machine-readable output modes. We still +have some work to do on those fronts, but using Cargo in the context of +conventional scripts is something we designed for from the beginning and +will continue to prioritize. + +### Does Cargo handle multi-platform projects or cross-compilation? + +Rust itself provides facilities for configuring sections of code based +on the platform. Cargo also supports [platform-specific +dependencies][target-deps], and we plan to support more per-platform +configuration in `Cargo.toml` in the future. + +[target-deps]: reference/specifying-dependencies.html#platform-specific-dependencies + +In the longer-term, we’re looking at ways to conveniently cross-compile +projects using Cargo. + +### Does Cargo support environments, like `production` or `test`? + +We support environments through the use of [profiles][profile] to support: + +[profile]: reference/manifest.html#the-profile-sections + +* environment-specific flags (like `-g --opt-level=0` for development + and `--opt-level=3` for production). +* environment-specific dependencies (like `hamcrest` for test assertions). +* environment-specific `#[cfg]` +* a `cargo test` command + +### Does Cargo work on Windows? + +Yes! + +All commits to Cargo are required to pass the local test suite on Windows. +If, however, you find a Windows issue, we consider it a bug, so [please file an +issue][3]. + +[3]: https://github.com/rust-lang/cargo/issues + +### Why do binaries have `Cargo.lock` in version control, but not libraries? + +The purpose of a `Cargo.lock` is to describe the state of the world at the time +of a successful build. It is then used to provide deterministic builds across +whatever machine is building the project by ensuring that the exact same +dependencies are being compiled. + +This property is most desirable from applications and projects which are at the +very end of the dependency chain (binaries). As a result, it is recommended that +all binaries check in their `Cargo.lock`. + +For libraries the situation is somewhat different. A library is not only used by +the library developers, but also any downstream consumers of the library. Users +dependent on the library will not inspect the library’s `Cargo.lock` (even if it +exists). This is precisely because a library should **not** be deterministically +recompiled for all users of the library. + +If a library ends up being used transitively by several dependencies, it’s +likely that just a single copy of the library is desired (based on semver +compatibility). If all libraries were to check in their `Cargo.lock`, then +multiple copies of the library would be used, and perhaps even a version +conflict. + +In other words, libraries specify semver requirements for their dependencies but +cannot see the full picture. Only end products like binaries have a full +picture to decide what versions of dependencies should be used. + +### Can libraries use `*` as a version for their dependencies? + +**As of January 22nd, 2016, [crates.io] rejects all packages (not just libraries) +with wildcard dependency constraints.** + +While libraries _can_, strictly speaking, they should not. A version requirement +of `*` says “This will work with every version ever,” which is never going +to be true. Libraries should always specify the range that they do work with, +even if it’s something as general as “every 1.x.y version.” + +### Why `Cargo.toml`? + +As one of the most frequent interactions with Cargo, the question of why the +configuration file is named `Cargo.toml` arises from time to time. The leading +capital-`C` was chosen to ensure that the manifest was grouped with other +similar configuration files in directory listings. Sorting files often puts +capital letters before lowercase letters, ensuring files like `Makefile` and +`Cargo.toml` are placed together. The trailing `.toml` was chosen to emphasize +the fact that the file is in the [TOML configuration +format](https://github.com/toml-lang/toml). + +Cargo does not allow other names such as `cargo.toml` or `Cargofile` to +emphasize the ease of how a Cargo repository can be identified. An option of +many possible names has historically led to confusion where one case was handled +but others were accidentally forgotten. + +[crates.io]: https://crates.io/ + +### How can Cargo work offline? + +Cargo is often used in situations with limited or no network access such as +airplanes, CI environments, or embedded in large production deployments. Users +are often surprised when Cargo attempts to fetch resources from the network, and +hence the request for Cargo to work offline comes up frequently. + +Cargo, at its heart, will not attempt to access the network unless told to do +so. That is, if no crates comes from crates.io, a git repository, or some other +network location, Cargo will never attempt to make a network connection. As a +result, if Cargo attempts to touch the network, then it's because it needs to +fetch a required resource. + +Cargo is also quite aggressive about caching information to minimize the amount +of network activity. It will guarantee, for example, that if `cargo build` (or +an equivalent) is run to completion then the next `cargo build` is guaranteed to +not touch the network so long as `Cargo.toml` has not been modified in the +meantime. This avoidance of the network boils down to a `Cargo.lock` existing +and a populated cache of the crates reflected in the lock file. If either of +these components are missing, then they're required for the build to succeed and +must be fetched remotely. + +As of Rust 1.11.0 Cargo understands a new flag, `--frozen`, which is an +assertion that it shouldn't touch the network. When passed, Cargo will +immediately return an error if it would otherwise attempt a network request. +The error should include contextual information about why the network request is +being made in the first place to help debug as well. Note that this flag *does +not change the behavior of Cargo*, it simply asserts that Cargo shouldn't touch +the network as a previous command has been run to ensure that network activity +shouldn't be necessary. + +For more information about vendoring, see documentation on [source +replacement][replace]. + +[replace]: reference/source-replacement.html diff --git a/src/doc/src/getting-started/first-steps.md b/src/doc/src/getting-started/first-steps.md new file mode 100644 index 000000000..3a0bad356 --- /dev/null +++ b/src/doc/src/getting-started/first-steps.md @@ -0,0 +1,70 @@ +## First Steps with Cargo + +To start a new project with Cargo, use `cargo new`: + +```console +$ cargo new hello_world --bin +``` + +We’re passing `--bin` because we’re making a binary program: if we +were making a library, we’d pass `--lib`. + +Let’s check out what Cargo has generated for us: + +```console +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +This is all we need to get started. First, let’s check out `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your project. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```console +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +And then run it: + +```console +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step: + +```console +$ cargo run + Fresh hello_world v0.1.0 (file:///path/to/project/hello_world) + Running `target/hello_world` +Hello, world! +``` + +### Going further + +For more details on using Cargo, check out the [Cargo Guide](guide/index.html) diff --git a/src/doc/src/getting-started/index.md b/src/doc/src/getting-started/index.md new file mode 100644 index 000000000..22a7315cf --- /dev/null +++ b/src/doc/src/getting-started/index.md @@ -0,0 +1,6 @@ +## Getting Started + +To get started with Cargo, install Cargo (and Rust) and set up your first crate. + +* [Installation](getting-started/installation.html) +* [First steps with Cargo](getting-started/first-steps.html) diff --git a/src/doc/src/getting-started/installation.md b/src/doc/src/getting-started/installation.md new file mode 100644 index 000000000..186c9daa5 --- /dev/null +++ b/src/doc/src/getting-started/installation.md @@ -0,0 +1,37 @@ +## Installation + +### Install Rust and Cargo + +The easiest way to get Cargo is to install the current stable release of [Rust] +by using `rustup`. + +On Linux and macOS systems, this is done as follows: + +```console +$ curl -sSf https://static.rust-lang.org/rustup.sh | sh +``` + +It will download a script, and start the installation. If everything goes well, +you’ll see this appear: + +```console +Rust is installed now. Great! +``` + +On Windows, download and run [rustup-init.exe]. It will start the installation +in a console and present the above message on success. + +After this, you can use the `rustup` command to also install `beta` or `nightly` +channels for Rust and Cargo. + +For other installation options and information, visit the +[install][install-rust] page of the Rust website. + +### Build and Install Cargo from Source + +Alternatively, you can [build Cargo from source][compiling-from-source]. + +[rust]: https://www.rust-lang.org/ +[rustup-init.exe]: https://win.rustup.rs/ +[install-rust]: https://www.rust-lang.org/install.html +[compiling-from-source]: https://github.com/rust-lang/cargo#compiling-from-source diff --git a/src/doc/src/guide/build-cache.md b/src/doc/src/guide/build-cache.md new file mode 100644 index 000000000..d253b8acc --- /dev/null +++ b/src/doc/src/guide/build-cache.md @@ -0,0 +1,14 @@ +## Build cache + +Cargo shares build artifacts among all the packages of a single workspace. +Today, Cargo does not share build results across different workspaces, but +a similar result can be achieved by using a third party tool, [sccache]. + +To setup `sccache`, install it with `cargo install sccache` and set +`RUSTC_WRAPPER` environmental variable to `sccache` before invoking Cargo. +If you use bash, it makes sense to add `export RUSTC_WRAPPER=sccache` to +`.bashrc`. Refer to sccache documentation for more details. + +[sccache]: https://github.com/mozilla/sccache + + diff --git a/src/doc/src/guide/cargo-toml-vs-cargo-lock.md b/src/doc/src/guide/cargo-toml-vs-cargo-lock.md new file mode 100644 index 000000000..66d52459c --- /dev/null +++ b/src/doc/src/guide/cargo-toml-vs-cargo-lock.md @@ -0,0 +1,103 @@ +## Cargo.toml vs Cargo.lock + +`Cargo.toml` and `Cargo.lock` serve two different purposes. Before we talk +about them, here’s a summary: + +* `Cargo.toml` is about describing your dependencies in a broad sense, and is + written by you. +* `Cargo.lock` contains exact information about your dependencies. It is + maintained by Cargo and should not be manually edited. + +If you’re building a library that other projects will depend on, put +`Cargo.lock` in your `.gitignore`. If you’re building an executable like a +command-line tool or an application, check `Cargo.lock` into `git`. If you're +curious about why that is, see ["Why do binaries have `Cargo.lock` in version +control, but not libraries?" in the +FAQ](faq.html#why-do-binaries-have-cargolock-in-version-control-but-not-libraries). + +Let’s dig in a little bit more. + +`Cargo.toml` is a **manifest** file in which we can specify a bunch of +different metadata about our project. For example, we can say that we depend +on another project: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +This project has a single dependency, on the `rand` library. We’ve stated in +this case that we’re relying on a particular Git repository that lives on +GitHub. Since we haven’t specified any other information, Cargo assumes that +we intend to use the latest commit on the `master` branch to build our project. + +Sound good? Well, there’s one problem: If you build this project today, and +then you send a copy to me, and I build this project tomorrow, something bad +could happen. There could be more commits to `rand` in the meantime, and my +build would include new commits while yours would not. Therefore, we would +get different builds. This would be bad because we want reproducible builds. + +We could fix this problem by putting a `rev` line in our `Cargo.toml`: + +```toml +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git", rev = "9f35b8e" } +``` + +Now our builds will be the same. But there’s a big drawback: now we have to +manually think about SHA-1s every time we want to update our library. This is +both tedious and error prone. + +Enter the `Cargo.lock`. Because of its existence, we don’t need to manually +keep track of the exact revisions: Cargo will do it for us. When we have a +manifest like this: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +Cargo will take the latest commit and write that information out into our +`Cargo.lock` when we build for the first time. That file will look like this: + +```toml +[[package]] +name = "hello_world" +version = "0.1.0" +dependencies = [ + "rand 0.1.0 (git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9)", +] + +[[package]] +name = "rand" +version = "0.1.0" +source = "git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9" +``` + +You can see that there’s a lot more information here, including the exact +revision we used to build. Now when you give your project to someone else, +they’ll use the exact same SHA, even though we didn’t specify it in our +`Cargo.toml`. + +When we’re ready to opt in to a new version of the library, Cargo can +re-calculate the dependencies and update things for us: + +```console +$ cargo update # updates all dependencies +$ cargo update -p rand # updates just “rand” +``` + +This will write out a new `Cargo.lock` with the new version information. Note +that the argument to `cargo update` is actually a +[Package ID Specification](reference/pkgid-spec.html) and `rand` is just a short +specification. diff --git a/src/doc/src/guide/continuous-integration.md b/src/doc/src/guide/continuous-integration.md new file mode 100644 index 000000000..6e5efe72c --- /dev/null +++ b/src/doc/src/guide/continuous-integration.md @@ -0,0 +1,21 @@ +## Continuous Integration + +### Travis CI + +To test your project on Travis CI, here is a sample `.travis.yml` file: + +```yaml +language: rust +rust: + - stable + - beta + - nightly +matrix: + allow_failures: + - rust: nightly +``` + +This will test all three release channels, but any breakage in nightly +will not fail your overall build. Please see the [Travis CI Rust +documentation](https://docs.travis-ci.com/user/languages/rust/) for more +information. diff --git a/src/doc/src/guide/creating-a-new-project.md b/src/doc/src/guide/creating-a-new-project.md new file mode 100644 index 000000000..98f2a65d7 --- /dev/null +++ b/src/doc/src/guide/creating-a-new-project.md @@ -0,0 +1,87 @@ +## Creating a New Project + +To start a new project with Cargo, use `cargo new`: + +```console +$ cargo new hello_world --bin +``` + +We’re passing `--bin` because we’re making a binary program: if we +were making a library, we’d pass `--lib`. This also initializes a new `git` +repository by default. If you don't want it to do that, pass `--vcs none`. + +Let’s check out what Cargo has generated for us: + +```console +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +Let’s take a closer look at `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your project. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```console +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +And then run it: + +```console +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step (You +won't see the `Compiling` line if you have not made any changes since you last +compiled): + +```console +$ cargo run + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) + Running `target/debug/hello_world` +Hello, world! +``` + +You’ll now notice a new file, `Cargo.lock`. It contains information about our +dependencies. Since we don’t have any yet, it’s not very interesting. + +Once you’re ready for release, you can use `cargo build --release` to compile +your files with optimizations turned on: + +```console +$ cargo build --release + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +`cargo build --release` puts the resulting binary in `target/release` instead of +`target/debug`. + +Compiling in debug mode is the default for development-- compilation time is +shorter since the compiler doesn't do optimizations, but the code will run +slower. Release mode takes longer to compile, but the code will run faster. diff --git a/src/doc/src/guide/dependencies.md b/src/doc/src/guide/dependencies.md new file mode 100644 index 000000000..5b03a133c --- /dev/null +++ b/src/doc/src/guide/dependencies.md @@ -0,0 +1,90 @@ +## Dependencies + +[crates.io] is the Rust community's central package registry that serves as a +location to discover and download packages. `cargo` is configured to use it by +default to find requested packages. + +To depend on a library hosted on [crates.io], add it to your `Cargo.toml`. + +[crates.io]: https://crates.io/ + +### Adding a dependency + +If your `Cargo.toml` doesn't already have a `[dependencies]` section, add that, +then list the crate name and version that you would like to use. This example +adds a dependency of the `time` crate: + +```toml +[dependencies] +time = "0.1.12" +``` + +The version string is a [semver] version requirement. The [specifying +dependencies](reference/specifying-dependencies.html) docs have more information about +the options you have here. + +[semver]: https://github.com/steveklabnik/semver#requirements + +If we also wanted to add a dependency on the `regex` crate, we would not need +to add `[dependencies]` for each crate listed. Here's what your whole +`Cargo.toml` file would look like with dependencies on the `time` and `regex` +crates: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +time = "0.1.12" +regex = "0.1.41" +``` + +Re-run `cargo build`, and Cargo will fetch the new dependencies and all of +their dependencies, compile them all, and update the `Cargo.lock`: + +```console +$ cargo build + Updating registry `https://github.com/rust-lang/crates.io-index` + Downloading memchr v0.1.5 + Downloading libc v0.1.10 + Downloading regex-syntax v0.2.1 + Downloading memchr v0.1.5 + Downloading aho-corasick v0.3.0 + Downloading regex v0.1.41 + Compiling memchr v0.1.5 + Compiling libc v0.1.10 + Compiling regex-syntax v0.2.1 + Compiling memchr v0.1.5 + Compiling aho-corasick v0.3.0 + Compiling regex v0.1.41 + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +Our `Cargo.lock` contains the exact information about which revision of all of +these dependencies we used. + +Now, if `regex` gets updated, we will still build with the same revision until +we choose to `cargo update`. + +You can now use the `regex` library using `extern crate` in `main.rs`. + +```rust +extern crate regex; + +use regex::Regex; + +fn main() { + let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); + println!("Did our date match? {}", re.is_match("2014-01-01")); +} +``` + +Running it will show: + +```console +$ cargo run + Running `target/hello_world` +Did our date match? true +``` diff --git a/src/doc/src/guide/index.md b/src/doc/src/guide/index.md new file mode 100644 index 000000000..c8a61b28d --- /dev/null +++ b/src/doc/src/guide/index.md @@ -0,0 +1,14 @@ +## Cargo Guide + +This guide will give you all that you need to know about how to use Cargo to +develop Rust projects. + +* [Why Cargo Exists](guide/why-cargo-exists.html) +* [Creating a New Project](guide/creating-a-new-project.html) +* [Working on an Existing Cargo Project](guide/working-on-an-existing-project.html) +* [Dependencies](guide/dependencies.html) +* [Project Layout](guide/project-layout.html) +* [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.html) +* [Tests](guide/tests.html) +* [Continuous Integration](guide/continuous-integration.html) +* [Build Cache](guide/build-cache.html) diff --git a/src/doc/src/guide/project-layout.md b/src/doc/src/guide/project-layout.md new file mode 100644 index 000000000..300c6e5c9 --- /dev/null +++ b/src/doc/src/guide/project-layout.md @@ -0,0 +1,35 @@ +## Project Layout + +Cargo uses conventions for file placement to make it easy to dive into a new +Cargo project: + +``` +. +├── Cargo.lock +├── Cargo.toml +├── benches +│   └── large-input.rs +├── examples +│   └── simple.rs +├── src +│   ├── bin +│   │   └── another_executable.rs +│   ├── lib.rs +│   └── main.rs +└── tests + └── some-integration-tests.rs +``` + +* `Cargo.toml` and `Cargo.lock` are stored in the root of your project (*package + root*). +* Source code goes in the `src` directory. +* The default library file is `src/lib.rs`. +* The default executable file is `src/main.rs`. +* Other executables can be placed in `src/bin/*.rs`. +* Integration tests go in the `tests` directory (unit tests go in each file + they're testing). +* Examples go in the `examples` directory. +* Benchmarks go in the `benches` directory. + +These are explained in more detail in the [manifest +description](reference/manifest.html#the-project-layout). diff --git a/src/doc/src/guide/tests.md b/src/doc/src/guide/tests.md new file mode 100644 index 000000000..95d1c2d3a --- /dev/null +++ b/src/doc/src/guide/tests.md @@ -0,0 +1,39 @@ +## Tests + +Cargo can run your tests with the `cargo test` command. Cargo looks for tests +to run in two places: in each of your `src` files and any tests in `tests/`. +Tests in your `src` files should be unit tests, and tests in `tests/` should be +integration-style tests. As such, you’ll need to import your crates into +the files in `tests`. + +Here's an example of running `cargo test` in our project, which currently has +no tests: + +```console +$ cargo test + Compiling rand v0.1.0 (https://github.com/rust-lang-nursery/rand.git#9f35b8e) + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) + Running target/test/hello_world-9c2b65bbb79eabce + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out +``` + +If our project had tests, we would see more output with the correct number of +tests. + +You can also run a specific test by passing a filter: + +```console +$ cargo test foo +``` + +This will run any test with `foo` in its name. + +`cargo test` runs additional checks as well. For example, it will compile any +examples you’ve included and will also test the examples in your +documentation. Please see the [testing guide][testing] in the Rust +documentation for more details. + +[testing]: https://doc.rust-lang.org/book/testing.html diff --git a/src/doc/src/guide/why-cargo-exists.md b/src/doc/src/guide/why-cargo-exists.md new file mode 100644 index 000000000..9c5d0d2dd --- /dev/null +++ b/src/doc/src/guide/why-cargo-exists.md @@ -0,0 +1,12 @@ +## Why Cargo Exists + +Cargo is a tool that allows Rust projects to declare their various +dependencies and ensure that you’ll always get a repeatable build. + +To accomplish this goal, Cargo does four things: + +* Introduces two metadata files with various bits of project information. +* Fetches and builds your project’s dependencies. +* Invokes `rustc` or another build tool with the correct parameters to build + your project. +* Introduces conventions to make working with Rust projects easier. diff --git a/src/doc/src/guide/working-on-an-existing-project.md b/src/doc/src/guide/working-on-an-existing-project.md new file mode 100644 index 000000000..34ee6c5a8 --- /dev/null +++ b/src/doc/src/guide/working-on-an-existing-project.md @@ -0,0 +1,22 @@ +## Working on an Existing Cargo Project + +If you download an existing project that uses Cargo, it’s really easy +to get going. + +First, get the project from somewhere. In this example, we’ll use `rand` +cloned from its repository on GitHub: + +```console +$ git clone https://github.com/rust-lang-nursery/rand.git +$ cd rand +``` + +To build, use `cargo build`: + +```console +$ cargo build + Compiling rand v0.1.0 (file:///path/to/project/rand) +``` + +This will fetch all of the dependencies and then build them, along with the +project. diff --git a/src/doc/src/images/Cargo-Logo-Small.png b/src/doc/src/images/Cargo-Logo-Small.png new file mode 100644 index 0000000000000000000000000000000000000000..e3a99208c287bc3fba67de085255df1b4ad22c0a GIT binary patch literal 58168 zcmV)>K!d-DP)qr?$rj3;+NC0000000000000000000000000000000001hnzLgMuo5Ie z9LB$Y&#cb2ZQHhO+eU1k;+$>Uwr%_Qofp$pw>lf)>tFIMW;+{KYG(L>!)F=1=YYco z?%MmI&+gp!tbuzDS!v|KqqBDWU!_w2|DUE(kDOQ;df=$_2JSxa!ht*Y8XdTO7azQR zCm*yQtG_?ACkn(<}F&^?kH>QB5N*+wZICZ zMXVhXK^b|TgUXo7!zxv-4_Wp6tWmh?jSW54_pqX|uE*vi zcngc|%*cZXp?}sKZx=$q6$l|GWF=fBS5|pSdG(@@SFZ*>CrSRkw2>wMoDoE9F6ptc ztE|4gTXRk;niNF~VFWn>%zN+d;Piw5k}z|k^*-l$jzXsrrqURpdP82Wp0BL(@ZZ_e zl0@z28T?I2+P`}FT1P!oQLW~%BxobpOPehK!Xn+N0R&zNjcVBh1l5LV`EDz>eK%)yn+nj zxI0C3BnUwS3HScsqC*6Mq;}wdJK&%ILYO;0a6||hkP-3{rt-M5%1t4!UO*^42((C& zpP-?KemlsLWU#dIoKrMk-mcpGVAocwv9uLUGDUy^(LvmvPWnRZ0NByxdeEDn?Zm35)eWFf%k`wyVDVZy5Z1@2K+R* ztbaxlZjJ+a9uQ#WboAel9Jh!No=-+;^4Z$WPF9nOk@hUAokZQb0-f% z649I=NPz!zJ8ACyVI@j=z}+$b=^TPAfSX|!QC20?X6O#EtTr949FI+~76?LC>YizN zmAX$_yz=hYV>jRR#Dxz%`t%LJ5EDwGWtLrzi6^PW&!^gYoq0FgX@mLqDC@g5EjG%+ zDT)HK2oCPnyRDNb%)0O2vvXEaawN%`{>l32OZeZ=<28FI`eZHN9ps6eBfB9mjDZZa-qCdp-ZzNr5Z{Be>~?F`x1VQq>uJ(( zi&-noX8qZk(f2c^>^D>NUxw@cy}JG@feo+6+6Q7^k2$h^O(C`^?tZ3Y`t8KsFOJi2 z1p>s$ETfjbRVhHGkA^bS7>Eq-Zut1r%J{k ziXf;fVO!UtuKjR!{kzca9l(cp-PFG1<~RHqURSlBcl?wbcE2Ho-ET^9c$4G_Qt5k< zaV6e>K(0M3A!h2SWJ*whgpkX=h@uKLI+3#dTuv2Wl+#IDF+`kXT-)Pj595BwI`$8s z&Osa?4M+|KrlD9z5ux#}K*@t^Q9mP#vRmEdyLkH@SP`X@=96Xk^F1ys;*KMyiq& z0V%H$gVa4x6N7RM?%ICNWgh@V)P8M5yIs=uYeD|q?6F_?#lVVvy=F>QAN@S`_1dZC z@>36<{@}m-m0?)_wsbb{nB(Y$fRe~586a{|NR5;7j~Plv)k;OG3Zw3U*fMYtVvSSS zJ|ZQ!N-dRy0aF_2?x+2s(CWd%){HlBg&mTtac|af3W=0WebF@Dmii9oW~Sh_+ZCrb zpJQ|9IhgJ;?}XW`11az->3@-XCIAX>soymv123Wma8$(hGSutT!wLit5@IUN`Id}- zroHb??`a?UF>eQcfPG!~lI`qI51;tj=7Asa@7o7|%opx+X&1L-y7NKyb-_zq z`_i}nlYcaBm%pQN=}O~4b0CJD0mDfKa+$y&C*dGANhdlVYC{ zLQ+#_uIcTtL7Iaf?5_RPp9S1xUl*=q^YJfWUl+UF!6VmKA9~Mwx5IdOx4z4?J;7;W z)^~X4$Y~^(CsGIoA!05O2?VN|qMRr~oeYwydTd84*o2g#*+LPIpWSD(j=H#tx}KaS z;$(??R19A1#z?f@moV>Ji%p<5N<@-|aoDlG`y%Vx&l0w`n6=5&m-oJ^tCVW5?%tvF z3LC7X44(r8v#eBbiBMpc%6F{$2I*8GE%ki^djb`QZ(6%&ZRaFuvS=eIv&_TE*QT&L zcE0)Ctld6ndx1(`{hB|*zAkoMdeiSbUjE>}cyAUx^6i3gg57q_c6H0FT;KJ~=QD&v z2xd78lz^K1s!j4@rE0^!^pXTM03>V314y%=J3KCl`yne00oW>F%|2JODG?dEUQ zrvGwa$^XyVKk9lPzqsf%fAUuz-}$b8{N6C_j=XQd&uQoPk)*Q-!|LP~)9w_nFq_SB z-Xl3c8X-r_W+^q_1)!p3&)`K)2}pKw=w}b02aj6jOSCn`Cuu+kk3|+whpAnQS&aPpH z1KK@}wx5Ca?*zV=ef@t>WMBKIxY)I?!%zLS$G5-Z@4Pn*r$-88JWYQ<){bU&ANU1m z%VvGbc6pmLZRz{Y#&pb1g#l6kA_^5|_hn9QJcA|fbz(7Zt;uhR`&?jZQ4TrRWY5&5KAbyIG&Na3 zr*$Ab#@g2^CzR=IuE8~3j?$yP2T_S(BFDj&WCNI4>ktFvgex%= z6{$cpNNQ1YVu~)P@4gA`-X^a5{QXln{@<0{*gqlm!q>qk|ElBL-~M;rJ8kbAd5MIS zXqt|g1m7)4w6wF!&>SMo0U<=z%iD~b6O_n&KBwzigI1ab1Dn@~Yv$Of(Be^Cf1g2s zw1c=aHIib$P8~~LtZlq4QI9&aVdSh>uxZ2n+&3xO_I_r53UgA)72K%IaRzIU@P0dPSf-=}O~ zOm$dZREPztBK02XeMbYUL!CEkjkY($^`9$eP{Z}#0tEhV);`()7rEHAulZBI?D+P# z{p~Z5-97Re3{>8G>t%W05)!nXHKx&Kfi{P@{xWFCcC%);zRk2-(RVHL#eqSqIeZMa zxMrWpVPgPF(}oyFrfFgvCsKr_Q7kf1tfRrtHB7^n)7vj__tmFK!)+E_wsE1RB<`z) zt%K&AfBbc|fPjHn0Fk;^rweLxU&Iz-iVnLsFJ#pBX%IBe;s3K$>&e`-p64!lSlI|Vi`p*EK zWnZ6&ifmttPx_U|x6Xkq#$;fbreKg6r)asW7zOnWrk4C1Z4YqWC0u*SKC@ZgqPzN& zXdn3DbdAI3HF3C08kfXz!?4>hO#?!t^n2q;qXUzoDVHLdTQR%>`(iJh;UDOKVoC>bT8`2HwMbIGD1 zkR(il_4KO zt;^;OUj=O0*Z+}{r@!@IaItG&4?X&31KG4Y1DTj2GYc~e0Yqs$c&9~8D7g$sXKAf> zrOm=DXVV?fJ@WHdrz4gpw-7S(-s2>Yr!}N4UM7@W<`G+i!fw0df(HVt@OpP&x=q&>z$%fXFBa#-eLRHJ-f&9 zYbd7FL;{H0qoGA8(OYbj8mfP8ZOP}^o@WByUi)fGkC3TeVn*? zmDWuh^({@)lM@KlO4Yf`l&KG2EIhM)5vh;KE@MTuNcLV;L16u~3a@i2s5IBH>pQQ+ zEFg}AAkOEeyORCEbzy#;>@JBqNYk1$j=1iK*+V~)w7W}KK2KV|h)$1(B2=QRc~{9kj@2+5vct-i@BsStC~5}(;>*sQ@YhO=1^1$)UZ~dERitOab%T$7%j`x0iNlNgB$L1_% z4P%IiD3D9;ijwHp`e>f+v0qGg=`q5%!h)aL5w&N%6?Q=xhJp19--Zh}X?0}LL+3#x zL9v4=W-{k1<5WRPA1qWmR>!2y+dvhmpN-{OA{0wnPayBOl*DtbL9A|g% zxaj<eZG!Nt~MKMXuowZ);h*p(p zP)|zB8Hj<63{DkQD%gRVeXBh+s@2AOho~c?_8Ql}G`3HC_O^Nat@+B2{VpJ~um7Qv zk9^?!x!AR@YX_&tH@@X>zBdkckCb2_yLB3Q`sFQTc;Z^ewfkq*2UB(SzN>wzjKM7@ zgHe0^=hGctBW!QuG?Nf~cNy*Oqggz}%Qx=w$_Kuf%iW0PiCN#$^*w3;8;M~A5_--q zrUD(awECYl!|U_?h5F2}`dS1)>R6O3Dh8wmVY4YY>(|k=vrIG3F*i8hs2silcj-yX zr={_PG@Ov972~jBx7`xLgh-}u)rQp(2ihRb993_5vABMjY4se_<~BmYId3Yh+TjG= z%e}+rKuQ>V5&H}X=K)-;YltP&rr?T2K0i98W>pm}2Nd9<%#Xk0+unEc?f>kZ-}u?U zOYG~vS?=RM@a25j7rtk|F7WzwzxG!hZl3#^KN7;OS4qTZy)7U!Oc?<#A2byxKnaUN zrbZQP!(Dp9dS7Y0W2cqlZiS|i6nBU~oCf-Qg9jhFW+$KHlvu4!8OIUPhNhe0v@u9! zGSsI}XdTEdRi_1jI{#R#Bm$J%Esgzp0$Eg?fOxytrdwp~E@jsr<>cm)6Pm+D&S1>u z#t(4v!gsT}^E@dgTr)EuILWjgx<*JjG1v%Iu;#u>MAAr31BkQWZ+rQ1dv2O@jlnHU zTPOz(>I8*4<5?gD^@O!%)$rweoIiC{HQ<%+MJ-jaWpGQW{hvFSLYqG_F4QqXV;^mlEO~L&25Nf%Aj07<^<#dYDYht(dEoIov^$4J>0zU zUYfW8jyg6Gb*rbA~wB-&_G0^qxNRAt_Ht6hO{K0knxFW1xAZ=bE z4kzRkNtFNN5CLkh59i8Zff~eZWwZ4BXAuqnQR<_{Q?Z#LW*R4WgIzYjZZ<1c>ov2c zebXDRw|{q7-T2FB|JB-bZxz>n>3+@=*)~of>8t)7^VH$u0^+^*Ha2eDJxRi{= z5wf^h)@G3rFF8k@#x>zTvUe82k|bSr{;sw&Z^Az-ys(~1QOr{G>Om0EDe4pjR59#L@c)P_tq=A$o$K%K_PK5(!wnN^7 zH-^4Fw`EC(*XZtZ((VOu+L4l=_>+AF25R}9s?vaRp3myugEt*)aUv(@PAJZ}nNE!J z*-m+kEta4N-4PIX z>ak{|-8^&O%L2#JB&-`Tj2^XFAUpKh7jde}en?ddKW1{Fe8g z|BFBMKjY!{!pq1wR*8}^`-jPx{VAnFA8^hA#lAW(j3p`8&m(Dp0T&k5Zn>U&cg?6V}Q@q%S(O)!9q zDUk>o+F5e#0`L;2!KHDB7JHy96*2%-1nRc)JlO`G5%TV*Jas%im+a{pCFN}b-G7|AJu{Q>VB35Sv=vc7v6n*Ohg@Bfne?q32E z-|z^|2kztR9twF8ZZ0l>;v|tvlH%%GnDv$VB;{|4XE^RnwxmWsXc0ifBfbS0YELbH zj(jNzK@2c;+LI<_(j!xHr0xdwy}4+dpFcn^o^aT2*l%w*3+`nX8e~Rv387Mnr z^gCiY0kA>{)}#gO5KfIRg*sGFh$#Uenc_kLNX`EaigyXpM2yGB!?^oh;+x+YLjPY~ z)BhDD{4ns0Z)gOG2mb$v`$(E_1!fc{3P9aQspWSiXDkYk&+*)S@=uU>VEO*9pu7BO z4C71s*#f7Sj0txHw16K}hX+lNDrf?e9S|ewI1-aX#ygUJ zbUI3kX3U5~h|Xi5(+Gg`Qr%sZl67UeLw-&ys@WpM>Q-TA!5e_QA$O8O#uekGtr}3> zGi?7i*Ec^(>K-w>_=x4jQ~KEop9Vtir`ZCPablQ`csJ4nZ=i;FTxe~$&@A3JGVZi} zZYl!C8@wopFZSY!;7{&74+~u7+yQkM;vjfe9B!xjJk{wogPkgxUu(dPUy_A(!};{@Rs!* z2lHt|Q!GZ7X&MmU*g&N@``CUabxa0^M_j$nlY6g{rdo=k6tF`e)&4)OW}5i|anzDq zT;}0*D4xS{YB!tB2gkcF9$xdwbP-|&F=fa0t)^!qQ6DYE~59fj@fnkhP(*%^z~+Sv-H6PhNHU8w+T z6WocIoD+gAL``TGgE!8Vexgb$&*Z&OJ;y3&8>9tKAka367;hM^UvqQyzgf?p*s!Eu zJ)&tBR$4ag!NB-1WNzAFA~ZtRG?;=QYeU`qJ!Z=f=!Okx|2fm<3#P+eE~$MnqO|+O zeip@p764CV%c4x}FckzMt(CViDx2+&{o#oCo__I&%d>NcMR5Utq#z$@GQww`qOM!spZf2AW850ufw)Nq6y+ zY0iE!-TV=bNIc##9yeBEk|HkmssthB>_UK$%OW%yYkoX#Vcg<~c;6wev-db635WU_ z66VI6a?TfRW=eX}Yf@mGa%N*CXrZ9k1ycHIO~}4u`T2@A60x&X_Fpze~GhnBev6D_&pUGRd5? z$3KyG-usxvOwj!o47cAkJ!#xu0qGle1xS3o<31Dx(n)3{C<5hmDu#awoB~+sqnu

(FRB<^BZBe12=hny_8co;;$Z!+F&ec3CyL8pQ7w7u&P>&l4M=5LCn&}> zGmt#sGZ9ss$7SUK2)G`cLOf#9B2hH6;jLfiodFb3rg^pWs?Hh-psc?u+Q@vrQuhV` zT0zY~xotbTR*7-X{`P-!b^UF8|B%_`$1KjD5==obFb@Z2-N-na9E!W`5ibL6>*>1@ zNjp>;T=x(^e?qv}lXhPccb_vIu9(J1#Kh$0((42g0?C1I@V>QY*=+Y5jz?UWF*|>s ze!jpnnFGjh+?r`7YNb2s8eqmZF#=!rxR1PstqQMCjSBUo`~wOQQJVY)S_KeOa>NAQ z7kNzqM+N&EDca}6R-|3fE=+HUcM!*c>2QN;%H>?XHzoITvV{*6f~uT)-4jd3Q?`AV z(lf?^G!A7;k+|FeAwin(O-o80)3C=$!uz~o9hfS30YVcb--DL2F>4x0DL*SIXF=|i zn5rO-OqmdOlkUxxFUgnJpf0d3kR4$h2kL ze8x2G%!>^sSOG8Pv4&gE%Epq9~Ou?hMcXu{WNS zqXp2?g!A4&tC>Hfon4w7W*i6Nutg=}%M?u+CoQF2aK_^*U_kX5t7Tly#-LCN@_#3T zVxHVN1KEk;I2ECFXqT{jAHVw8%Fl8A%!sJduqHBr=6eA^NIA8J_{#u9OTkL9o)ZYg^PNna{ZU~DZ zr^t3cqB_#G6I~Z^X+lDen>{9+eN3y7Y5T%JZP>nGv%WUvfeUk%4?d#rXNVpd_pdk( zYkb<<9!^grjsw!TlFb(BR=NkW`*@pS0F_QAhMbg^<@X}eb^!TIq`qI^w6x6S8@{1g zJf@vL!Zk|-%|h}$q6vp)!f9E;Q9#T=byRgm+AKV-+>poH2Qqg`VT>8PygS*1KIL;S zk;V)7r8Ua^>LZg0;ToKCH1kKs**e}m$L~HffqR;cBu(WRC41*9U_gx$GT4adP^r8*`0R!};&@>GXIw7SE``iD{iuJ&ztg!byT@!?b>Z#+~U#H6Dp1m`2pO zELA&Q!Dk`&Kz1K*H56A(yik-1MwH$dfRM8?5vYanW}>BcQhmefen|r%$)%)5?4CvgJFxeY0V>lb+TaXKav58O7T$qJ5f@1` zFnfY+dzwE67mzeqvsyJF&J(pE`8n?Fr!s%^Gf9UlhTHF>yU&ogGmaN&%0q@ccsO0& zOO`v83Fk1SB1m_x-n&8}=9U3MDr$-oH`vL}z4gPoPSNxg`>&pJ_vM@>i`N&+3);@% zjC+?z2$6>9W<;H_-74$dOOr{><}3Q;IkTz94KF$FZt;3BZkiB5C!|b=?Kv&m>m*q= z3{v@p_ds?ZZ#ydeA(5iX+_0Pl<+8F~phB^EE}u^}c|@GIzE_w%Fd!4a>3}tbQzWKz zl67T+z{25byyTvemfe*}wE%|0GA^d6W;(%xIw1G5Vg^A6pc63)v@87C`}C_1Y#Nu+ zL>%^@1CB(Dkzs@>8Rv_>S6aZy8SR6g%>2nOV14x|>u3Lv#=pV^#i!9q=BNot{?Q`tixIYSl11r~Fdo6!3^Y(DuOx8VWZ z`Ny0+`j~!siARY;VzV9DY}W*@W?z1GF{5pTyH__1ci$s8MaMl!6phn~>jzYyw9jAe z&3yBtr*l*@Nfvo)DcL=c-N&i?$g(L20i+BAltjv4qm>V()g8jX%LhWYq+dNH^cVQh zWJdbV{yu4f!Zdd%RHCK;Dp()PDJZpsbr29zqVS(IQwTsHpG`?~#(PYcm0|j(MW5lFEV_tC51eUXo&~!FEm8Lgf29U4_824Q4R=gq$FzS6 z`GL8z66T~h+I~%R#OZ`NX&fgT1bFY5_l@5!gcZaCnx;Ik;El*xxdH6;?Z9zJ zh`{xFWVPtBlBr`)CIgSxj%%*6u=#7dYuCweds^j+NnwwYu!#u${+4FGb zX%`JhmjP%*H4>*Ih|xH367X$@8VAl?A~0HlM4VHEBIhclrgJspNhm_4WT0Azxk)RD z6(D6zrp+O2Q1YT<&5Cl^K!Ic)D}!VW(Urm{%KYebKvsE97|Gev+5u#^l7 zdtJ{#xm}O6L3sLT#?yx#sk9sJGqU@5i*b8<3sM0EI3cp4ieNT4I6q@{_73wOyV}_q zPSk9BblM>##7d(~0L?%Zf>9*OM9#7+YR^c4Y0A??SEv)U5njE(@Xi5to^5%exkR+H z2e`!txcR%5OmERxq(QDGh*q9;A|LqgIi&NTICQl`~Vj0Hz4e zn;V8@U{g#c89EHn79`IuK489B*d$oT*Nodc#={m_eng}(oAESma70iC8VmVZL{3Ub z6hx|L86tMSAHTbxYlSBGVjHA@3SXDF2eNyuWo7bP_QOa@YM%P5vnMuvbiT1N%~;)% z2$HjgMD-f8y^4YXSJcc6tD@=zb}m`(XXb{vgi_#ip)EK81IDO065fGpdTjD|_93Bv zfFMRaV7P@eGE5`G5K9)+XQ$qvC8ziMZhm`ETPsC9^&S*E9PC=M2CB`Am!nw(vZF@b{fg3p{2Ax~fJ zytTfah~scLvb()OrD3*w#DfP*+C~`nw~TkMY-JtkfYC+9$$k=nG=WAagBt82m;c^V z%3Uh&xv&Pkw|$u?C9|v)l6xR~+i?$KDfgS4SmU0k3eqsYc*<<`jy3TGh|__1+~b`> zp;-ut+-Mhvl10@KM5Iy*^81riss#vW$;Pq^L)CO9O=yDP!C`wf%}&}`@9U{aXN1WN z6sKqaVZbwtk(ku7hS{tu;_=YTP}kbVrt2@h&GzaUo!)XWgZ@E_^NS3=;N*m}mY=t} zTp(E$k+Ma2p-&`IIx-$dbU4ri10X9oQ|iHwNHS1knnsdF$Qh^eb;G)f18nOP-Zl1I zo6Qa*33p4*9zV6ggiA-J<5vuuooP`m3rzz;LQ{kUq6tv25bAakZ&#T-7hs}T5EWxh zr2d{DNya;&uH#d^2eSKkJ0U270T|9F+W95R%l8TWC2iA!PR5&3jSwR#mCe^hOPGu{ zpy0qsX5mY*z9~)%T~i%Rb19pB_NA1hF#%{6ekTlK-5kGoYIZlizeK@k0`YK_dtcG| zUgMY$XJrqIwy~zX(Hwd2thMZ`tW(PpsQksmuIwCJ{K-aF^a3L<(-%t$9-LCjt0w%r)ENE;FXRrt!=s|P7wqr;IqX|`SN?} z&O{M%U#pN=ozyQtL_x~*E;HPfl&FrZnkSFyk^V95>I2glitDVoo`yR}BPkhef^o9! z%SxWbqN52mc{C%(L}0z%aP#V09Pd7*=Z=TH&|EesGgRwlA@LLt6;}Z&71~6-M+=)I zlJ6b7oPsgM{~x?%X#>-7K-}H{#QBb{U!c0QOjq?lXdFJ+o&QUxxmOLl!W zRa@BXy*F)VQu|x^JNiuoGUYzL=Ak4rEo?#$p+||3A{ry07$>Vt+#tnNJixpM0mi5R zG@g_FiepR&P?yG1ogk=R{wbHV8i6pUy?EcUFWNH<=%zgyH#ms~a))8E=Dp&Wbpgx1 znilaL;(N=^UVin0?dxZx!&fZZk&8vZ`z{kWL4bsw7$fFkLh+Aj9-R%+==vVL3&*peWrsECM-Nv>| zIY#1e00%k^;60iWIA6TQfKxK73znHmoZ(dPZHo}luwc8nB_?onZTp*A+@kgXmt|K- zbswpPtME+>$9;V5Bc;UZ;ZI=o=wpUSxxHPp*&R^d;oBAX8IyXz;hiIRheBoBi&R-q zK95POzkP5h)*LU)@Q^2TDVb*&D7JNI_TDdL`SEX}efV=w*OQKS=x~k1txZR7Zg<>n zj-&*u#f;1Ig-L9De}OjVM$-J`d;gnH|JOg{`21gT?!Ms3x!8L{XiCO7#Z;66wO@yU zSJrfvXBDYtgpf0}98rrHJg;LbtK{;el?--5QXv`>zcKgPcD6DQlQ1J2Cpq7r$FoNt z@$m7x^dTDkW4!x5aq}5Eys~W6CX#6c%`E@mAln#`UyI#`6kxLqwOtbWGZX9Y2S@ZP zHm{$t{`LIc3?}rd<)1@7v|t zGt`Cz2ShiBPK?9E{x}vX(tOs^b`3&6Gw*AhBDb$zuzvM@vwL2&6Hk^c-p>#yP828z zRSr}aa{yVSjRA0N;)_t4@G;ftY<1GG*VS+`zu$XEO7uospm@0}Ll{&jAy{a>Q1X$~vQx7HK;*thb{}sugf#9LcP|l=xq_}1mn>F4 zk>l~e?(Q|4s~g(J8Ar*tJ!u+AX~av)xdg;m#L~HB)SM+y{P(?6+Rz~VQ)K=D&Eg%z zgycue-879%#`}s`>8Q|m!B(t^Wa4QvBWYl}zT@WAciG>3!pv>CoO$|(Eg~%`ReA&{ zJ_DC3#^e{5@3WM<6ssLCS?7tJWw#6|U?*gbTq0*~Lfd2q++x?`usxOHGq0=zKSzCo z!QXLh+nK(+TIn16lw;aRwrZAOf>9-26Suz9cMx zgrq$T8#GOf(_~HVF{uH`tZ!+IHqjuVw`&|GuD^WF?Thcj@g?W;#N!7oB6Cv%7)V(L zM75$W8Cmt?hbnUdiX%mElKE;>w};Yq(we0qAQZoTdsYE)9xu+64k{huXSjBrWmyAp z_X3G~rsEpX!RQ%k;ON0Qus-3zfL(~0Yvusqbie3nJFEQuW*CScKl}T%;}^`l4Oo!S zLIRLVx8TjF{~wAmW=a$Zo74f1UH8oXw;uN{u^J;9k61H59uc=;9B(Z{lV-_s{(#xy zCov8K+q+lnZfudk!U1>?=Y+7)kkJyE#mEqnx?x<42?`iok##i zn^^V1`dKb?mVL$KxVyXN>Z@-tZ9Zr2cf2$6G^;lExk|1gg(gEzziy5{;mqob5Rh4#Yqg*Dk>|MBCuBVcgw7+>^#NHo+VYnF28Z z4G1V8R7A4BbGjw`%d3OARy;ThSk%mmT;k?u1ux_smlKQhoR~L;M+MKDQN4iw-{G7T zAP32>PfJ<2=aJpV+lkWON(zz<7F`-iam2*L!w&Jc=<$Z+dUJKXc<>J5-)D1o&31Fe zX0v87pVM`712bt>gylyzd35apoerd7Lz>oBl1AIGZGvrr@yh~T*W!F*v)1?H))x^t&~UnBvP?9N&}|8>;DZIfDJ z)}EPj;048@5+L+6%^ZxACQR{w47UtBE9IDmEslhyf#e-AnzX8xX{TUfQK<|9x!?Bk z=E(JW;9_}(uOF`reQM9F{`|7_fSAaH)xx4L7TGaoVYV|F63KyT_6)1m*(lj0Q9Py;$l#@}C8o)ytW23E zVZ5;&4uv!#-V-zFELV$Y1a-Wb6_x2B`4Tq!iNhf>rNpb7fu|1xoJPLC>V=k|WT-i6 zAFTX*Gc-{mYd-7XrU6y4b56id?=!Odc)L-|5>sXjY4*g5DRZb2s-uxQeVV|WN_R}! z(9O>XVadZsPhtKMoc(03ZeAfeF>9f9b4DhP1hwl04E4Q6`c!}Vv)116J>O$+&Ah5alv^p$9Z75%G3odc#e2jk^ku3Fm0 zRhJ6tdxcXXD^N7Br)H15u?7%M%fu;y_u#xBycwz$=zx1;#rt@>Q5aI9fL(Y8DJ7nL zdEmn*GkYyX&J-EPi1m7+GONwJdmh;tPIzOQrQN$Ye zw>K}jd-Xq(viUnWsr|7CnoG=_ zbrr=zOrYZMKG23H_X_8z1VoZe9+@`AwolUmlI@KIa(V6?;wn~^DWgypl$?T=T~!gO znm=bYNGxW7cOUhfEjk{Yx5NmbR4~fS`cw7pc)_Wc< zJEld)et_$3B7_+}?bzLX!tUw^`0)jcE;76DIGLg3jA^Rwum%WFms|*$kO@u=@C?*E zRG+fqT&r1LLtdJz)Qw9Ng@P)@*bxz2Xz;!PZ?uWLDoqg`jpHS0XFM--AWd1Itto?# z!$NAA=;Y1pM+(5yP%MfwMoHKZK6olsnl48DhFVu1|JBhge!`@W@H?nKX){H5% zn9cHD{>;qG%*@Qp%*@P87alV+!{x_s8M1eiY+^fxnUVDEuIZ{%Du0Fx&#F2#9*suP zW}|kWqt=mD7NHd4Dgt+ngU?dknPO~t&N>7y(#U>oV2<}3QfduQOhb=Zx1^Okd8|_F+{K}g{W|v*vk7) z%NPgK>L%l3K%M5N+3HXMpSAV@oi*eOhk(2RSM^Z!E>7$m)%Mk31J!s3JYk6YK7-ha zb^?ecB@Bl3D)#VWD>OCRxeQ&?|AAqeF!}OBU>#%;dd92jYH(8>jh}VTt9b@K1MAhF z*>`FWQjsY5F=yGMV32d;1!id-bkCMLIn(D{?@t5tq-xTKX`%|y5Nb9PgQvX07ky@? zAfWXxV6=S>ej2<-AIFdE$8GQWK4`8S#a4HOjm;g5%Q#I8$PLobwNQmjnP8cXR9Yn; zOun-`<28WX3l4U2fkAQa)gD@f6wq-}HyTs$YVnkDpRFLaS1A|CJA1h};fIs)@0>%` zyMSu2&g`nb=rac95b7+hTFui{+A68uA<)vKuK`;OQlQ0qHdfXl2CjOer%TvKvY9+x9MW!whkd{J+_jvSV4}Z1Fzqys9&BM$1S~J) zRLL*2GaD&pt}x~Ew+EE~fi9S5GQX(}Q3cPuTP|<#gliwiSnTY_%HCTr8V)h+Uq}D? z4%*FW{;&nKTMLvbRK=Js8{J3Cv$ddg24yK+AjREi&#z4uV_6Zb4 z?-Gig)9}4B$U`3oR@ZRw=&fih9>Pw4gslmv4aX6U_8uN+G6kF}X3DZr1C_QMF^~^} ziq!mMj=lzHojI9vYg52fXuAi^sZe)fXe<*n!|*CX{~XHR1u`)|zRvNIeZdn<1scvE zg0g~GeU+Kyd~RY@0-m8_~7Y+(sXe`R^Ve427iwYV)O(l)# z(crYO0DG8n&KzqW`H>&-n6vB=KghZ17&s*iG;WXyfWxTAYnLtfG{3)=;-!Hytb?Bd zLg2A!h-IJpGC#PAqI(YG?YF}Zu28kUdgLbTUAqZYY!T3U{SkFqtfClOz)!~D2bU4XoA9G9-Q(yh zO;!~MuT|{tDQPJH0n#8pf!K;oZJXe=pOR)?vhB6EQe#jX$t+a#+|Ty=6x-74ih)@- zS*2!u0z3h&09iGdX5Wny!(+*^NB$t^77d#a8?&d62LT+W*I*`+U1c*$beGH62s)c7 zU0ety0u}!~_~9n1-ld71p*3S4`wp(+;IUf?KHYwSt?e$eVC2nZG}kwFCm`a2;juEUNwmX3Pl1_Qa;_t04P=+0QsLH&?V$34y6) zVq#O6S!-{h4cNGpPJ35O5>WuB&Jk3f#M^8!uAfpb1>nUqog@ z6RMx*Bb%3pqc})gWZ4x#498cStQWv(#V`dvnai_@QRE7jF%tzDl*tWWgFr+Sqfs_m zzYEV5&xcTU&!d>Iv^ZMF;z9!lk4*EW-H35$P5`Zk{_Y6Dwa{MJgJye??5YY55lkn| zMT5q|L4sFvag`gdQSPAXok!U{%aunz+TbCz@Wc8{$7r-?vvpBvy%rW?`W$NKXmNIb zijN65+^7f(WKdC+W+l-sE(?$ZdVp4y4SIjg6_EK)~dS?Y+qAgp7{+CKu|=tu4Scj9yI>PZ=`;r3_Tc z7hU+_2CCj=6x-8S+F3MX5Bm=s!lC20F*#s&cZ|&dNkF#0>s^dW#{`hA&K}ApTxX3o ziLWu6M;NZd*X(QJ%94Fm#Q^XH5WQifMvVF&v8c2@ra&k6G3p)rJ4-ft+h?vB2|OB) zn4y3l7S!0UR*h|pQzc8lNQWHjomp%%sI%lS5{Zay+9SJo@o;0=9LVOyY)uMcTslxh zz$TNMX(pdxYoS7S{eD!t*QSAcvZ8znC=KpOoLCDiHR}kbZx!BCXj@R~5JrlnUEl&i zU@WbHwPQD9`M@cZv5np+gIhd;eDMJCMviK@iLyUsU*{&*%kZO3#G=PUkqDJCUC_jC zEMtM24fyt1Oh8~Z@L5TNG+bu2FN5}uN(8F~3uR&Dl?Nj8Stbn_KLbu#cy0Dpu|Zo$ z(-^c5HG;-^$2#_}jI$h%78}z3P~Tgdr@DTa%bb&$6-z`wOu&FusKL(JcY<*(T?1wB zB1Y$)irq^uMmfBW{?0Y-GHxua@>Hg*xmeF^bADzpU|H7%0b0F4z{5GFENyTN)$S#X zCxBLT&!9n;c94grL|?-1u7jf0{zZi0HH2b^Nh1+Tg0ys6nz*uzdBth~NhYx@Pwvyy z&w5XN+iOHR;p^m+y01S=^lAW7`N#Cxuwd2kJ-%;(@usbc>I{PufI~$95AbIKrcX02 zjaP5YV`L9^nHTS)3jvNG5(G|0^eV-qkyhv$XzGWje-jk^xQG7fr=qy@JPg-g2OewI z+u+I}kk^O*#yUU_gkHe=JE56xd#1YKGr35BW^V}| z8Mbo<+i&`3bl-9ZMi1T3(|l|^#+-02GjSMAasg2!utr}|sOraE6oW04gPmG3F(muS z5K=r4gAs7f1}2kjg5!v+NQ&5uZeaS-xtENQ)gWP36OG2pes-!5*w_*iV1!Fqme~U< zR%geg-znRvM)5Q4mPpcE8-PzYw_D|*0oynSvWK(G%gXXXReB$ila&+;qXmHsKkJVl z(DaN(MC44!C zXW%|%IyRtf*n^brNH8l5Tvt7#Axc{&Q@_UDeHb^3&w*@S9(cmH-SX{U{*j+j z_~un+5KZG{krPI-F)O=}zRYAz2ngmKUxgZ|cnlg)=kCh$$fR zXV!-pm!Sr~Q1>5st*XeEBcO2#JXC1_7k~i~jaNXRk_P;3y#+G8$DU0;$v%AJvWu-+ zz=SD^OKG46(7eDy3j+e$1VqfLr}>(ZKUtgI&snGWR_m0RF}`P6y2b_pngDkWWdDC< zUhcl@+m`OW{p&vao`3m@$CZO?_kPrSAN=d3#m-G3Mzf_xAVfn5Mxvp1iFj73dVWq8 z1LUe{=CW@cXklV**0haht5@RsPRTPX!Tpp|3v@B%3dh9h5<9eRoL!~VxWzPP%Pf*@ zL}v4OZy2!Muh`Gkt%Ci8`e&McQihj;rcuyGfe20P%Er#rLPe>(lf^>0_-6G|=Th33 zn{hP_INM!H8neC`bg!{F80kozbyx2{)@+`}=MiMDyZsyAql<53 zOv!^Qn`2IpM^5Gq?W(yZ7qNbF6lmlG(+Io4P~rVpQhPi$o`8?tP$%p$Rlp_^M5vZCMAPJ)E%pvp?(FDg*4n zl?EDQK^}u~Fsw}|kggJnskqtTr2UG984*~MOs!&M!y;g-SCJ83-WcP}6TsS@-1l}p z!7PM`H(wZHqg!G)4mf>fh@uP(yxV>_Di`fWZ9O*|z$yCx&78ZbGdMHp%CrJ&(q zs^P|i0T4^@0zB5&5)I(so(7gX8IJ64p{gv9v45*XHw?n8Pzty*tBtTZ#OM|!07GNB zwq4>a7X}l68nJE)R&*l`AdgDVXK%YS#Kv~X{z{*$j4EgjXHGGPVpmqXrF>=9rPJ61 zxHW*JE1+h@Q2=R*&8{D1uqAEJ{!*=&>TMz5{m;uB$mRfc*Eb)(=k~Apt_gU(vK(x@ z)Q`5l)s@{NSr{OT1tNu|zD{FIWj6pYSnh+wsrL9Z>$dUWGks2CQ4>q!Wr-^q7$*}u zGyRzToircf%_cRrx=SEkz!?2GqE5@N5%Kl#xM*U=#D1a16L+#nlM!e3hXN_P;!6@pV$ei zVgl868vT029xCzHO9L{nY5PtyN2i^$`**ov3GZ@X2mJ` ze^kbfXx;Mp7NvRluaxIM=?~ud6?c5!PglFw?hd2Pr=#5dQdGUY<+zWc7{S-zmE}qk z4Y4i>C*iU*SRAZNNwWeW7=da8S)4LP7yv3Iup4{>Y`Wb^XOC;#J~7vA;rU;N5{`i7_P?>_tP&hU*t6N=uu z_@a-h7$Q_9nNL$$QUeqR<9+TpDF{FW3d<5;SMLF=~$#etJtR|jo4nq;r+QeMrCl0kB99Z8RbD^N!boOVa2iw}Q z%sQq`AZP*H0-+unNr?e0$P5!hG@37y-6B&45|gC^)@`)$N7d^ms(!v|Im71m)Tcf* zd)^a%_XA$`FW>pwYdd#7xU&0-f5}GYKXo)3EEMIKdk-@gU_2HAgZ@yPbv6S}B|QJ~ zSf|x6&$c$ePh)#8R+Krzv;;=edIkSQAuQ{XgdTcVK&Wj-X62DgP{w;P0IPRWEKF07 zjL8|-3N(RI3Sf59+T+-0VyQ6_oTf%M{I(N|6L4xz>$&J~>+uCN8V=Wc)g-V#BIv#O z!T{ZYN0x)^EX&A%+26`SuJqgr?h6EHX_=i8$L8LL1tI;4X#A04j_VuK{Km6i5e)5|S_wIP{!Tz&f*&4j%dq$)FiK*L2 z<`pYsH7Jx7*$k9HK@+u-UMqqrUmCgCSbz?;Rlwis!0c8du`#(|dd6a0J%N}QmDP5M z1!#5B`=FIcwg&+%&;Xke!^YfHCzn&%YwP=zWoUXO8>-bp^#b0i0xoYB6B{E$0uGtj z;R7voML=5LD)8Xh0UkKBiz^#rY;`Lx3QSfjxs7U2cq}hu6ZY2P7r@!8V+Q^O9DsFs z<<1F=RHt$3-ngJquw$#>q}H`h76`NilLY@|%HV;7@c~AQHV3kK`Rl*@ zv*wGR^t&JTs@uQ)uh+UycwlMw)qfIpuYCAuJkAkIG~kifY^4f;<{Xa*tJwtW5dp1C z>KnFpfr3`oII$uDLx1Ej0e&0bs##MVs|u=1Rw-?^3e{^*qy5 z1za*QHn}r~iKGE;RVEb%RECAe>B~c0*(|t1d2F?fldCNR9~tX9cXc$0kL^+~HU+PU zSi9xmX-0!lsMY?StZe^EL%G)Y*}9f#98C4rMRv(%bi2S}4-p|NHh^ueM%?P)ZFUXF z1&4}QtP&{lnwHG;Ez2}swU2WkdxtHr`lqkI`OkmylYjVq7M}mw2!?X9|rN z6+iflS6C1q4HZ7%e71 zV+Lq*qDkBSnvVPHl}guO46a4z+4I zFR#DjTbA#>`Yj7xVmmJ2JI5O~9}y1=fI&OCT6Rsz+s8BY2HK;(DrB?BE!M zT}h10GAaA6DFRNmPZ_O`a^rbb>m+*!Z4PAb2<3IRfBk#C`X9dhm&?J%z13jj@v+$c zT<7~s!$A+x6TBuycZ`73$TG#12s~yK_6{jo*MHB;7FgAfOXy#kg!{ zSkTPa2vh_pr z@D;$7j!9rAdXLKM? z0-LEJT@5xx_sRAqnr&lU?TV+U`_>T@2R;@E!RpyKexMdkBhCa@{hOoN)A|@Fb0B-$ zeJ?zD&r^Q#d&=R~tHN;e#VD_TGpgQUU+ki)M(`A^Dg_P{wAN|aG`WnPOiHVgig!uO z3f47hcZuy|w5}6lS!G!u#2l@a6IeR-p;$WkF=*|7R|K?qm`5zO5Q;tz_)sych(f#TX)zNEf~Dv+B46qd7R-oEFJFaL+nxaSGq_s`p}{j~?mYp?n3=y%`09Cb6EpXf^fL91%^)S!C# z-T{&9iXfF|WK#7?lXb})>)Bo)7-P-q2!bFBstO*b=30BPxc1&yJ~aWawGRY3s|e)? zD6Rul7v*>t-QEB@{prLEVqF3un#z9LAMC#MNq3dK zb6*l&V-Hu}LWM>apl@7R<^)Fdb|FuovU)I1h*F|evofizMJ|Hxi^=#HP;rrw zG**33y-j+cdw~!j`GitF>piniQrR#9LCJ1P_i|T}W@$(mWEwYjVuv&LD01u2zLh3g z4Toc^Eq)pB->r4lSvo4`E6SJxE%f_@ zin9#UrkG9@_bTjr=ni6teaH`g1oof&AnZNzES_y7$(;0T4}zaqT@|=M;9`Ylo};zAhR)&lN9*WEqq+Lt$Xj~=KY**Y5sDrL{atMD z4AAY5dD!dnVh4Mc7tvl=M%La3WG(wgD8Z>_67-D>tL!%}AjfWkOXkcQx$&d!@pMEK zoR0!THP$=Z$Lbtdgmea^Y*@%)CJet#b&^Ei#4Z3X3DPrq?Qg@%qkkCPZX0Q`k5ryO zys&zN6sJI{Ifv6RflsAaXXLs$@f<3fl*ypvSz7sdyVbgw8!nT1Zd^`Fu^P1VxU{mm z+UYM}#Viy!IUQjDkkB!drzodGTwE-?uc_p0+U)|%OI`H(eYCnO$a@<= z*2DSn9!BSf@ZMVuGRpLr5YaY~1GY$D`y;IaUsABNe6AEpDbQ&@%59@0kXJCY-Yvuu z49CgzQ95dAHHKxpI-EfUnxuqTnegalM>NJ5p7QcRK|rx5!-}=l8_y&t{ELgXpU3B%X^R0afZ)!v-FFdrSf2QbKG0M zJ6%~_DOy>z*)9;~hRbB08<*2!C{a&OP!0DmIeCWJ=~sy3SLn8YE7!I$Sig1O8M#?5B69&QC7`v6eEnPK-um0nyZ(nL2AeXFHca23 znw(-bUF5!|xEPlLBHd04OG{n!2TN%8){u4AQ6tBjz2|uL@n6L5Z~rXf*{9g(&2V+2 zk0Q@Bu})yCv_7l%1#*t~s{!27^g7h96!)?vDHuYLy zIa*~UfD83*5NsX`4y-I5omKeFvoW6SP2^{K9WNx;DEC0T6X~&BfR|p9gq39-=~02n zeLAVUW{V!AJiyLOX%r%&mDVAkc}FZtL1;E9R03X500THfz#Bdqe>Rs?hew?<>>PXi z{6#B#ak4p$-TRaK_aCPB?rn6omL~mnz31b2A1O@T+;EvJU;O71FY-XjFGoF*hrX^x z;vFS-h`hZFm-n%@#qAT$w6u z1-xsqqUY43U>_#KJ@M6Sb9mGzCV>S459NlDb`D)Ag>6{s+^3)j=bN+1h$;j~kQl7Z z(-Mjs=6Q(xZbI(#1h&y6;>%ZK?ca0^RhdXPmD=NBjk2t|g&GG{1 z!3y~cURbtXwn+;~Dh?_BH_j&xuTLBlQhn;OYJz%tj_85Z z4FZ&xZOQGIy3BwE06}OEC~L_tAdHbvAXO(wu7h@W1MT&z;)RWerVw9x8w zP|t=4$)h#6iO%vpWW8&E%aOtap&Y@f?tWF(DCZVKM4m6et2JLQz^euL4v-a?j)ypZ z^EpPxFW~Dlw2Op|L4iE$!m1U3YM=mRbyP9d>r*X2rBsldU;}3X3(7!8jNI0$k18H7WDjVa{^+J4S>mN0iA2>ZISJ%>WG>4LS9UQ z3>%^LmzhyX0D;5y)i!QzIY}WbrYVi5QO@N>jIoPr8*TajB*h%&<5@W2w{E?RC`X;PiMOJD+_IuRr}Wm>&EBOT{@h79r1|D?cOLE(jno zCzpGDz@t8_0%)@45L^q|RtlTj*6iHM1kTIUpR=R`rvT-6EM;#@rPLTZ4G0h%BEm{$ zWOKU|Q8vXZRS#_%rBVEsUk!yI3pfQw4&E&aFOobm_W+P{v~njLvrfxn(Diy>At7ah z55^RjY5%ef*Qvq4r%Ht!-O%y33q3y0#Z*-GIgqvBv$mu*vZ5oNd)Deno;AxnhEtC( z4s!hLMX!D}x;|OG`e^q4qwVbRt#=RdjgJw^AwoTsE)^gly<_&j zm5UwNkqqrXl^$xzqlMWB)%XNqe1PiWC1%H8p}u&IJWa5&wt>OMU1V2&7sulc-keM@ zt0BK=(X}v|I*ez4&vKzmtgbGj+h0c3UPH>4Fquqo@Z#5a`SG8@$+N$TY`TMuC17*C zk6yP0$!|nay3j^%hfFJ9bxkoure4^2keFpAc<^CsXo^L?R!>4-+h%ZvL@+YiB#Y`k z2iVUxiGQ(%GK3|Gf>3E`1H${7)iQrlB(C>PDpA1D`Z1z{U$?aoO6=gnJ3ZXL-i3Dr z;Ifv_ll|?&3%IQgGPDYhXl8phy(sSe=1J#?6-}cs3&-PlQtLgMCPbv7&RPVU`*7(aZt*}1u1cl+4S-DC%0{0eb$fLNXZ zVFIV(o_5;MI~kw1a^XWx5QEM_98Ch8?gyf@g@kH?dUke+1XQ+r(X%Tr@#;^ z-5ROB!13`hcHc~JK5e5rxPrBfEew`7;5!>|#X3UDadP+)JHP$Q^Thq1L4EQG*1C(t zeGh9ZJ!DxXz*X1m(`@A)34ju71>EvnxNPbr5iFc+3cxk0mX0|uJ3YyO8K4V`2E0Ge z6{rr&_nalW>Kc&l19{ChE&x^$gASm1#`y-D&HL>PYyDh5mmnec3r@VRhc~)1De3-= z4wielambzW{3d0@mpfxokDa$SiN62ZAcIb+B8QCEV-zj;9jvM?_c zeTDHJ;`A7?8X-v6NMM-mG&lEl>-f9_GTU1ytaN!uj?N;^h5>LKXU0${0I8B+jCyv4 zYJ7-txPyAQ2Ui|osTgB(Ibo$6M48~|BI2~{V~SPrzILDg3eP|OvpC=R5xVIJTWc+> zul6O`KdTa|I*_QSWqp8vqg?^W9H5;AHZ&s`aTqT{Jp*8>x?x+2fM)9u&uK!^5V+42 z06?^`6+C&xYl!sDn(-Al!HA4fsMsVP-0YyIqK9yaxwEwv9^UGTR{zb-4!Uh;CZDUk zk9cD{2Q%^J_D?I>R^Fe&y~QFvVltOU`bt*x(C%MBDpv7g zZ;0>z@E7stKmAcT8y?K=-CBzuJiOMrxfVLTc#vn)mq_DRNV7wv@=PcjF-Y!<95*P} zEIN2+xoxo}8c>?PGfOLc-3X zKEy_^5{^@=)58~Ee1R{1wwV0(HMZ8exPE;LgQaB&8-kKZ+lBDc?2JO~#q@q*lm~KN zXdlec*rB{eD~xLzDm>klb|a)n0L-*E_IzB+;E{u~3Jwr4#W?^WdV zA3VI8J>FjFtapa3R{6@u;R}Sx9_on*2_V%oAO({)q35bViNx>uPR8f$UI_p_}0ItFKmo@!J3&{Ez3cT`y4_HR_yYrS6uf47M9*?_sy(^?u5mnTr|GZsE?~#Xi zS-FD_YME~@a%(dgK(f&1&$JkDeWQ&J@Ac(e)X*wDqPhbCXc6+N6-+!>#y0huqeH>+&yhJ^Gg?e&` zFgpioDJ+pfV=A_KojRQ!+BOH+Hn4Y-#@|9aKR?yh9!fE9E<5x)5EX?GICO*IRU}a~ zQ5G5q2r&rjT4q&FPZ6d^s742pPpgKzLbmm3hVEbut^O7&*T=;qVO#<+>!N5cNs%Bq zky{3}r93pZr7}RAJPzn&Bansfz=VJN#)Ljiy(kXQ)iq!%ynGHBjymN=vCL-1(NUj2 zSuaes*}Sfad=qxjt0ZiQCH10iZnils9f0J{I?`H%+yi^ZGFfS%d?5h$%dbSnM`f{r zc1u7*s+LWe9(uaucA5N@@xmO~QNU5jK$cNmR#k4fEt+)%+M>uXtus7-Q{dOHd-coV z)mi__gZjz6jk)(VXm9q%MN#g!G+9(0cM+z?(w%}(0tiq~9kLM1cwNzcCpN)zNtf`S zPq^Iw{`{Z+3+VOwq!u74I037DkCmQfygRbdxi%^&s%aks5G%xLhPoUHu*LFBz^pub ziu(L($zgOm1^O$S(h@bTTNq9wLMl+S`eHs44?)2cZ~*d*+Gbh^P||3lcp=7xy_cM* zlXxtel0KB{VQ2_MzEAQi&(X~Jh%xjepfav$JYWqf0$ZF2a^tDo6v>BqHtLJ?^|^A& zzSx<{NVhqtNpGfx!vZ3YZ}#y1oj#U&nMvY15DXY{6HwXQZSWh89+?S^88+Jck|}*A z!Q=44>O-C7T=9*(j_kLUNj}8&uTd_wN z-W$Vx6*$m^oIUTHx#r7eo*S3bQnK>Xk>mS0`ius^60(%51xYqT^KJmAAR!8kt4mxlR;NjB@2binigLuK_0iH3a5+x^R*bKZn;L*Cnl7c4`v{hz+j?TnuUQ zipMXN$7BT*VZBGQBx+hW0kfd!yD`4WJC4@R4hFYYlV>Oh-Y&Cq1>FG1f=AafQ`@-VyEv=*IU%@nXF)lqK zS^{XTP9M$}5HQftHuI`L3k$gPToMB1t^qqTPV;?Qwe@Y(DOXt-^)3_i{@CPw0+G`( z7svO-qRH9GqH7goBv>(|Yb+*7PRCRGTpj8FX(_sMwIlyeIxWd18ikJ~Rk2tU1Dwe2lC%iylgmX=;FZZ5whf!|`b0edXzs{k8hcZI@SX-B0g5 z*eY(Xht|LyWKJrNuMwx>eF^U?#abnqT*K4jBrJxsV4IiyK41|jMylNMxXWjr8<*3- zFv@sm8%sbd6YdiZ(bCDIw+kND%4&XA#d_+zQx0DDHQ~}TkzJO9p z>-NI?YHhU7@?ydJ8ZW%BeN^K&l8z9Wo)iEJlDf_6DfB(d zZL^Z$nP-tfizi^(z`qq+mBOg5JU4fV42GP2H!u&!Ov zhc9}<#1=-j%jf0+0w5Fb8hW}kLoC%x8!gDT&oMpz3TeEH!p+d@uOREJqjCd`t3q0u ze9=dDuqu6W%aT2plaPhJsXP$DIS|9 z_8Cr6*PjbNC?)eArN3cSX>G{f41L+JNe>8#Y8yo^-v@(kh9|dsSm_r+ZoPk_D;i{X zt_kN%82sI~C$CM6iTs#YZzUKw=C&B>n(5wPhz>|1A6j{IIGtqpY;TeKTCRuLow@h* zaGv|x%(pjUtCtSEn=TUfuMnn(qV^ifF;Wok%gFpN%x)^mQoPZEsD3xyumX?JB9Ffe zQ^w!5D3P(QVUP)vkSuRZrJD_xO8%vQljlFg*^9p>V3zl`;M?nS8z2)v>mrHw6nS1d zT8-BbMU6ws>GfpMLGVhvwCemB((Da7?EIzbLn{$?`v0tG~;ZDP|hIQ z{GQh&YujldAu|_qpLCdW6X;T;hwdE+exCOZa{N0&?AvJ)3Q$9gpw;lwSoI@2UCp5? zrx@@2MU3};jM3|#;^N?QOwV7V-M@;Ww*}W;hVwaphJ$g%KtNH8Kng&dNr6Dfw&Q)2 z=Pyv6evNSP5``P1-Ra?%pM2dvKRXo?0QmDJsjG~3jS?AdQUfbN!B2*oRFgNDp6`lg z+xpFqu(AF7=wAP0WXq3G`&IE6c)d#E1n(Wgpd@Prf0Li_&QZD63|Hg>9BrkN0IA#j z5vtSL>L_@Sv*DI&d>&u(4X$h<0)QhuB+G%llUlk#UK}BPcDNc&({{CW^Fe%kZ)@&- zg<{|ieHsaPAx@8w$_r)POZX12$jIw$RzA_V4(`l6)ObA4#YCR$GSb&n<8|+NWN)jn z5@#!v^u8!UDBhapB=`_(rBkF-95RgdeuUZXU&i>=_b@(vhT~ViM3@aF3l~ca{)q5!Dklj#!?fo*rSgP~$jzf%0M(w>C1jzS0-nvy}A!fF2l?T$#^8c|-YG$Zo zj!u6AsaQ?L(#`tby*q9`&Aiy`UgRz5d3=R5-A9-mBUM8{SocDk07=f!P|sdtW-IL^ z>GGQ3EI&=wWZo%%jq1B>QIa*yXYNw^EB>-OWaK>A)*G10AEyJ!F)_k=PDI8Cs1AE3 ziQ0=V{|Ahp|Iax8^1nfxov8p`wA2WwN0`9#>h-``aXqAfSXT(skrW8Zx%Jx-8|!OW zzw{ToXrG9Uc=@D6*O%R$~F+YI=ckvdGlSxZR%9N%aOgki`BUgfph^HOdd^*J$JfIKx82y%W{m5 zzXFcFhb$}5%qLlo5MbChn${?*03g|(Nf$*!DpZwv6BT?o!`gBOo7>+)T>B#&`;Tx^ zZ=s3>JtDn+T^e~U9up9Q0GjAk#d?Ce9HXwraEOQiJxb4H4MT6XB{9!qWpSJs0g##W z2C3F8%nCGfNA`i|xnTE`LSgrtHZJnV#E^;JeTI`^!lyeGe)&AdNxX^Wtq0iLyaFGm zs76O3l~a$^`>IExv7fZ(Q9a0p3I=;IZAG%VFrPP^L{VsoG2!rZM!H3l>&D!gYoH@5 zue|S<&OA3RsTq%i#++F+00!___9*f_2a-eqF?uPe^gU{bX^*0I1elOZ?A1{%+VKp> z^x9%HQ=AZMV6ul+xr5F28{EH^KqHK-BIWIp#f zlPu3Nnfi_@db~WW@ylm5zC7%sz4io8-v0csfH|md z@~rFvIi4!K)b$^Z^><-d3y^*GVvMsVe*w2nG0fBWj{r8Ccs%V{6n+rqT0RM)E!;_13U_%vP{6;z|{<3L-IU-=MQiU++R zsOdgYXK>ERFYt6G=T^{^wVsnzbEukaJsUx!Tqb*1aFqJ-6|VJ<@#tn7x3}-3xbg=$ zZT&tD>upTZK$_+qI2B2Vy?F~+UH~qK_c`FanWoQ>m|-}9iHZFX&;&Yi!OVSa=-BIq z_at-jJfsaUv?NHudU)&v@a&Cj0+}>zBf#V3QH`H}UE%YCCFCoQ@!p5u!3U3SV6#7k zo4%0of%R}#+JUF zFpPdmC#xZnFv{>$e~H*7U_)9eYUr4LTwl+y+!JD@{FPNN#$F%jVgvv|L$xG2Y0@!( zdVGjDdW}}KgH^wWdsj-_-`>FL+)FE${|Ni>3C_Z%sMh){6C!UWn(#f*gLiowS=NHf z1ki*uO8Fpyu}V6#5(+4xSYeGnK=3xi*u*>$pZ-SfMnOxflMSzmxiSl8tUkiDSE)u> zCMqPKye#pvuRV^c8@PJsTXXO0iKxqV;~|20Uppdq9%m;)i;<9W2HUK{_}pZeLQGtZ4nX#yIKjVGp0(KKQc*bIxCfX47sUMJfM1aJt5yvJ}=feiwX zk*XR}am*ZWV?9H!rBc!SEJ!}e3d9@6zIbhtdn*y^A!OLZcn^N|3jOd3x0X)v@J0)_ zwjUth{6if2@8V#xfobK@9$XR9ZPs2w-daM|>I=Ae-;&QE@Dhv|>Jq5Dvy=;ZVNGcu zR@u}5OX!5nk$v-G<*a!=0z~d*<|qORnf3USFG_s6zXZSZ7*9UM_&0dN~ z=A4wQIj{?%r?@0}OIB|A$-F*RQAxsO`apAV?Aas@M8H^nH=pfJ@x{&rlbPIa8A99_ z5L-0M_weoeeSCDUhk2Wm2zWZkBHr2IS&4SxP3MLELqQb}egjBIdf6;r{0Enx+0QV= zhJTG|s&=xF4G$f)=;1yZA@ zg@@)nb61{~RAzYq@2F6#PqnOk(}^Ovn8zSA6jVvt<)_Jm-v~FU6NbinnTtpnFsui7 zJ-&)l{|FApxW0Xkt&Jt19>Rr1?&};8i!gnxC|bkGgCF6%X6L|=j0=afXkHZFDEKP( zEjGb81cKmb7A2(+A`q0Cvfj(3@N;xl;{NrX%;SXj$bE*@L5`K4!`?~2i-U=_ZF#9` ze)(#OTbnI(+dwdL7Z&V8^i0yN!r0!|r$;KR^BeKfm}BOqTy9Y(M-K-n)Ml*Ow>AXFCYP z7l@O6$uXvSsMLw5VCp2Fcydv(lOTb7H_)qsY|B1|+LnyUYm{T;+>%BZO@k=de!V+I z6hLx#aHES4?hY2SHf6@c^($>8If0XKQjROU%<9}0Z8iXq&{SlN+O3S_SJQVX#9{K~ zWd9{JZd^_Sz+6^@r3WO^2>8&Usb?=C6xfm@1|`eViz8U^wS!ng3{j~S7_=iY5G+k@ z%nIZgu9m610(Mfilk0_qeX-W*=V#K_>1TWB#XVf_pW@#780+n0oa}#z&p!VW&v%EI z#wAhpD6$0a^^ZX!s5rpri&@b|hBopx+hQnzZMbBn2%w}}dSYg2YJ(|tBF7|j@!9Du z{P5^^@kRMZSi1H*`0j^y@q6#B;La7G2#2UIUP|aF!#b4on_{%e9Xh@0>FXiRM-_4^ zNFafWrGPkUlhJ#(*9^z-&pDMgPblY>dEn8_E^b_D$#vY`YR|Vja{Spvjjvx%@Y|gU zKHnKj#S#fJ@3Y9H(H-8)G%2l&GYmGrBh3QDr8Ccs%V`N5Zw9CN9IG|o?7@spA}s)h zqmE^>1Sb0Bc&%(IFp*Oxr-=e!lnaXJ~*Sj~)) z=ag*?+hPPmUNPH7MG8oOcb37wD(m; zxJl*ld;)y(qQ;ND^4Oc*#M-qd`0lsf!ymlAj_vglow$!Me2Lj`7q!lM6iv7gI8>#f z!oh;#(b)_Da6Xi|r;zJmieHQc3qr5~WDUWcmGH*JB;dC%C)htJ<+E;I?T9B<2MDL| z>FWu8_0@3heNAz4ULhsuTvLdRrx6#ETBd((EEl4h8)HIQMJW;#g`DEMhKJPvV9mkN z^D`c%HY?6hh3V9`^=-rbHwEF>P^Itzl@k`ej)T9;n8~^;@eNQaCb9c zqc?>w4^WR@&+P!^3|pxq zokHeM^^jdSv8yc^G;_b5b{fL?MXeS4rfnQY@HwXLv<$-)Re6qBj&xv01qWf0a$Z5Aw!f3aJJE@~ ziXQLXUB=2FN4J$hkEhHvJ$L58*mMpH#FRMl4ic^&oR)|>qRl(Z0csvyl?gL4#?>JGn9mGTY7*V z$3#5|kL04sagC;M>S&R##(bD51`zl-_(Q22j{G$B-b`-Bel9s#1QAw0Ls*zeQnLub zuc6n4JQ%L$vE%Y~*`*uyKwj+g)tkc#Mng@8Wfsdug+)n8gBsdTFj1 z9&0$t$>T=Jf<=Tp&(!;>#ryi%&Jt$rN4WoR4q#8VaBF3ZPW>8bv?G;AlqWhWZ3e_j z2f##tX#s+y2e>1#N(zRS?Uok+Q|}6_N?4+6WLT=+9vUGlE3uOIl7#(>{TZ~FB+UGa zaeybm7YPuR+gn@hc?GkN?fIIblFBjbW-?6Wz3m=uY_y@pC)wX?XT0P@bzldA_&Xqb zI~NweQ!)vA2NSMYH4Fd@*#JB6159LV(%yuSja-*{S>lC(+j^`fXzA4?D^Mt}C7)zN z8-5;-g98Hl>p0$y&Vyc`79-?Zj7B}fWdX$+K{V1XkY>m5vwbXK7q^#>@WIU*5AIxt zTl*IF@Vj_5yN9!KAj4QP&(PYL#6AOgeOtT?VckzLPi`-~uN+^WUBl|!`}+3#ck$ls z0&DpJ^2tK5@Bm;ypT9Mv=TRz;69F$<34!vsJOB?kH8_G$qp{loe8@@1Nk&M5Rbvr2 zv>~Q!keWdq|4ed)Qem8fOoO=VoeleMT#+HdxPPs)$bqTkIOKENMJB)|^4c0=)`1@Q z{5=6;d^4$-``*Uook!LT_3By-`sku&zRJuFv^=S3R8sku(t245S|Oxb1_XdSLoHh& z)$>w*N7Y{=C~BBM0npUc&sU>lGq)W;fR50dkQ^l2w!4AnQeKsMfkCG@TOpxm4tc(hz6mA3yDS+;O2V`&QLQy#jI?{~GN+t+0(Rg37 z-x5z{pStT-?IA`A7_un9bHixp>v=WwB(*`0jo?oh9v8m{7joX9$5Ud6(|FiQ#Pd;< zQQ7U5;A-=Y;-!fa2g+mQ)jn4I9_}o^!MAQDY~Q{HxB6}Dgzw<_Xd4&RKp!mV;7ua8PYb8(r-jKdem ziZ*=Sfy=sLKJNj}i-*XH9dl1+(dyUcT1WaDnTk+jk=ZN;9Bep?X&LeBuSYmKtCjo= zVd-zJwoIh!92Pk)X;=F7)3G#SkBi7EzkILyVQ)?vf~WSCa0Vk8huVLedb zogUfSX&liO5y6BrLn|;R1xxNEP$aejp-~thK!U(U7C&q_8Iw_LXy(nB=yy#i8P&jS0HxwJ3Qg>mK!N2=M}01#K)* zOOVbNGRev>z$@<~YYk-5Rmys(lLuVEDk7_o91a6Jd5V3t&BfA9?7}vkLnAC9*&IM-=@|HV?tOXcx|K++yPS z9l`b;{o-vj11Oy6!vsFZmubFB_FD-b=C@!yAz9OJN@@l`xL3&IcgtEL)gIBC!=nCKuM=?r}f= zK-M8i@nB_;3zg&cRtLE<^5qzNKF&8lVyMJ$SiHMA`Bh|m&c{K)i33Tjw4Rm*_w&D0)lWh?4RwWBiI)an9ih9(rF9Ol zeI?-0tpT>zrdYzhD0IZhp^TbgTzo-`XgI|xvjwWu3<7B;{5r% zJK^bH0?t1{Cmo{|j*whN00s~f0a6zhRao@UZD&T*kw0hWiG~0H4|XUln!HOKpH~8E zpFSUoq*KZD~JKSa6zLv-Q+I@8~xZxGxVikl7u*q%o74MI$kgs(#qtLliyx4P(cNwwz%LHaDeeSaV!=(WYf^yR!E zAH<$cR!whSkz;`b1z#Q)BVdP+yc-AiwwfK3OGdtF;i2cneoQ9*IY5KnPBUNT05IJu z`)KSG&_---XSxkXu;TGRFqK6ZY6h>z^5R0UY=1Z#R(j7H1Oi{9f75~tKiAS$csyW3 zrMeC}4?sP&dJJgp%UR(++k*wW?U}eHa0t@YIQ!~PAsc=S_vZU(#Zw6*6LQ4F&?+TT zBN`3pO)!G@%n@=S;5jqgMXymnkwa;U+{qAO=LFPnZLN)7+j9VkNz6ldVwrc6^sj^D zzF?u9+%PRrVbIM4_=?WtplJT z5+6_pCZYAS3ETwN28$oO4zk`s9sLvqyh!(O)+zB0`^OVWj9agwIiQ18U5^fq(V2cK z3b%RQ0-QJGW(7fg@YP_3@7m$LFl1Q67c-20-d|hYgfrgHtc-@QC->VC5CNX&&3T27 zpO5hM-V{lb_`)0G@0<{3m-{*Nr-Z68nnqi#B$J`Lo8{_!A zgaezMzj<+ne!+#GV^wDj_FB)f*(PPWC-ke3XXxenH0<=U?a1GP?w9-i{9p#}JOH*q zc@78$K>e`2W0Ac^2#|f<&O%VJfb)K_7R|B`@AlDYWr)eic{(j`=6~un=smZE5%0Xd z##?9aJTjPHM6sIc5Ulor-U!1x!}rqHOxRm(ez1zgsw2;%fW+JzJt3RxvO`jspCWLY zN}0q(;It<`fbu=Rcvazi5-^@6yg82mfWz}3p!(vd!t3LOtW2#Qn@)Wai>5CV2GqE% zU_+3%9?imXz<%aOf>#)n(XfCEDUdQ&ho4G@?p;bRL0q*pX( z5bWtAnliV@D&!_0Sayw9+zBm|Aa(Fhv?+pyRd^+caFKnS z6{;a401GywiBv8z=oNUpy(FzTff;cC)`RlQPnji|^XeKro{QDlC*1`ES|gNb!*+0? zw6z>&_s^;*4E+!j#OoZ^QpKE*qBf(~ z0R0%@(coFJ9f+xR!^zUT z7F8`8)5;Bi$a@TFU>X19^*HMJH-rfpj*$wYlnM}}bPhnHX&eyg7nyUocTyws=0Op# zdVC|tqq*I?n&I)y0-MWTys=K<0retw4l8{2s>IniBKHIVeouS9jAu)YGbn{9Z?ZPwN!Kqm)1yo6;a(=Av(TsOY z05_qu8Dhp(<>QVe)`SYcz}<)Nwqq@jLVM&{}W(v6}+2s|dxC59tx!!;0z=DkC!$do>y zB7~VHsl7>9i?J$eO%v$c_F~||1ic!sMn023m=e#Gd7fV@kMR69%?VGQ-u42-9^dN8 zIx#0J-q-3N!@XoeX8bh=iqQ^Vi+BmyosBar7buAlV(z z7!C}Em~|c8-0<+>L)^G=!_5tEyZOuTB~DLITW4oyE8hF1xfNNKVQvmCCw;X65L~NU zZZ*RQ0MxVMVsd7JQSv8UwqX;{*4QdVuv0J2gulEtm~r@kgL;Upq#~Iix@!Xg1=NvE zQ>dZu5 zEBjOd(1T=N&<+{_KraR&L1Yr~7B{H_rrxHRd~U0c4DYof2$k3|15&gCX}l={Tyvq< z2_&LAB8jNXRq1y)8`U@))`lvxwv^8StcOV%v3p!Xlvwm6dRnF4KpMrL?N9X{iR8Dw zCq@sG19}_R3MB9k1FPLtRk?P%jlp2xrqk)d@V1(Nn=k(3k3Y^o{q)miuK+eTEG)}1 zpXa&vbCbtjA9}0VZvctaG;*a3iPkE?sz+rtyvV1D&ECw!hHgH{4q{D$Z8YAT37DJ> zX`JPDNs@ozjTJetKJb7x4{H?@hi3s_yejeaekJ^}LC4|RnwRtR+io6u7Fg;6vpV5? zlrWl6DMarFrzCMY6lHQ4KAKRpNkY}P!0Otyjjrf!2|!toqFHG)EO$!c@K}c!Y~`2N zz@KMDn&`!_6`sJO}lTZNWjoSwhD}IH+j~nO*tY7vlxUNw`>3NmfHoU^J7$ z0G_^{A_T~FURlZU(Y-!yUFk6Q)%;$DbH_1t=DTEbgP$+XIbYYcTV7svy+Q|8<&z?#=WZxr3Hs>+y12w z5u!x#*-nYig!Xzyh*WlV!Un^N=$a8S%gAg?Z1Ef%hJc9}XQoXp`&$k`9~8E!?}&Q+B`X z7f-Cy^3smvGmrCeko$ZwNx~!h;?)Gd{%Rz=G7W>}UM_WWp>y=R9xO7@7+N!omS%ET zZ!MLBdSYTlQDk{h zf#_9K_oQF`09M2{9E^WELU-u!0v&Q#d7e%wJ`OUhGk1(%*V}VyQ{YH0J>) znv=7p=UweNbP7ntZ0AiasCV4k~Kn)pJ?uA|RTAkikHiHAo8)FCrLqh<=J&+2~^vq&{@&Q?;P;DypiI@odgT*s7h?GZSONh!w{`$_6EaEdPulEsO+_30oNp zLl2N&{-*~4RSj8=hXH_iStv4cr$`xL`TlGa1kgTvSzV$_DoQoff#hE1 z@3^hG8>C3NZljRVkdB5f7HFWMoOsa|UKj#0oTqla>^GhuLvSj<2Nl**-4AAkFDEbo`K%&J6Lk+TM!5T0iuV~zLk z^szF?n3rq{B&v50FzmFUJTx+?>XiH9>bck(}{T`HG7F8ywksx2cY`<@;dhpnNx;;0UW+-Or`YZmtTPL)|Cu-2K7D}&Vjr*vCIQ^HnTa1 zrlJydK`@J8?;$-@(@RQ#Xrc=Nnu?#9hKrG!U-CX2sRhJX8-_FIW%*d@YB9A3hFl&1 zLr|WRd@Mt7CO3}f;yN{^d)F}uL;wBTJ#^b%a%9)nTZjPq9WS0&k$VBJ%zIp&zjxNH{tVP&@=Va-6c{#3YF?`LkZz;lZ^9rzux3lVhY9aC{!IHW052 zb%^4vO=grhVv)ju_tkUXO_(QITC?#ROwRX2C~#~0+gQE+0X81|9q`K!;g{~C4z-czaS6Ij zQf3|K>~k`=qevlAB_KI7hQB7qBgcHTSL5YTg-MyPzT~mo z1p=QM9GnH@Sz6@FwDk!Be9_2xvo(eU+=Y>Fth)cKsKF6rAYlyhTuH393WrQ7HIDnu zC^s4_hq9(CO4U|9#;R0^@pyd=`5b*eQ!bjfl#ymN6|jJ(=KmM=zB0&hE$Q-H=}I!Y z?BZs6^f50pGcz+Y@9$;i{h0Y1W}bOW&D>qg;BuK!($z{CSCMhU(fg)jdfWWW9r_FZ{iOhY1^n2I`t< z4kWIu74XvCDsC)!g;SN{Z{DujC%-29gKSo3_H6RG`BE4vw)|6)n9=-^KA(H zaa!fn$`od$F@287qUI|BV_wWJ{*jam~$9(dNmt=zZDF}j`tlGXa4A){*4!Z>>vETZ+hu3 z{+6fye zfB?-Ob}$~aV{0NBccJ_N((*o&V@>UT&Hth8)LT#{XtFL^#2$|sHyC7IgI^}C#8^;q zbuXa4NqS4cpv{sqMAabgYcNh_c{q1BCVmqUXX&~9Mu@FikpA)+_>F}_8t@CXd3qVA_Zt|qC9_2_PU~oJ+ZWj+;eE$9@Y;fK6S;zCobo4(4Il1SjOyB2F08M%eF`^ zsnZ?IIcoU(yMwU=5b|jRozit(NGWlAeEhaB>uY_&<>h57HVHsJK39bSj5c|p2TG|> zg&}%FzkX2f^zW|h;`Z_m;%)lE^PWoz3$a^`>Qn`_;d#8e{mpoD>l|wRatdfh^(bOz zNw*^fh++$Z5yD^y6$}vieF8S(nVJ2Y?17kda=l_t%B+}1Fq6j2Pcr#tVnPMCJB-li zDF_Wl$Gr$ky8(_`T5&;tHdiJgj76L?#kG9HM=50m{DAUlxUe8m$qU+F%nCr5P4D~} zxLrxl9pDz33D&s@EVkZf@VXGegniUvcvyOyue%9KCW07E?|cjsU^yfP zz3D_4Yz*_Y0DvEh)CBjQ+BOkx4RE7p!67slz^HHl%f^FUiBDX~#$H$+hwU;>&Q@Z} zVY-w>F5}X5+COUH&dN3(ZtSDk=@&8**(4j2_MW8?j&0jf;v{rBowtQqU#k*xgk07^#+C5H5S#l)xU;&8&AmEZ|EJEKz}YirzSJ5- z_oZzmoU#jR?;KZpY7=UN8|2c2tNZ`z@ zjdE5n-NTgTrXQ0ZQs@b3M|AvR)#A1%?HxHJe@(4UXb%+jn;|{FvhNe)pMQXnF;b4q zJSXSK)MTE@Yz$AwyaDXhFpM)u7{f%ka*7E+n9RY6$!SSoFfk{Jn~8kzE(T+enW3av zxi!wm@yG}xAO6myGDb_{2n0bu5l+0u#>U3m#;mXA zo5%f-76ALnv^(&7XitpRVHhR`TI(+&HWeR1Bw5e3F;mLo^lT9)W=r%YTl)>%THe9R zb`AZ(2%eirqBxi?xmcXeL%55$eUt#&GgvuZKp17{M=kX@e%Ky_jJItU!X_uvY|K^M zMPM@j34~4ku@vMK&)77wb3z|){1lnL1)lpYll=Uh?Ih3CoM~M+P8y36Q4T$FSCjM!|_KoL<`O- zGRT-*5>4AMuBn-ON>9ohLSp}$$4>}KhLM?&=Mb5EZi8nBqtBEb z<5=hnZM^!x!v|l<;Oa^dx!epcoS4E~HHUoGLDqB78;-HIQ^Wn$T^!U~$YnfSI=hH( zdFl*K%~vS}(r&jg==Viz7>$N>GYKJ7nzD(B=t(gydRJF1V<53&v!6f*xT@8RM4K6?ET{TC}0vhi^) z&de9#X6NzX_-TA;?HSxTT!e5d$QP^dvL#5b0LRNi*jZQvY7Qb;2B}tuKN{HBLj&i; z*w}+J$g+_Vh>YYut&P4%vzR~!8qMW}3~+kd0#PUE^N`V;#lxmTy`zZynsHf@#`kGx zNh-KV{g*jc(7DPvLE)$Zdz-=xzYHBV_83hCRuhOgGjC3cH`|<8W1qo-vEOX0*Ip0v z_-tdMr{UoluNK@04rcZn!isdAEP^p8cA+Nahrqxk1Ox0NJW@l21t8^E`e%I9w&T#z z#V@_+;_g-n*-Sb1z{;o;J>;_sD-!!l+nRh*j5W3G}z zA?woTI&O9GU}F!f+cmVi15B3-`1a>6;ya$Zgi{N%)G-ihf;aQ%EaWp@6dU@30eNT# z2L}l$J(0+Y-j#Y}@kJ1^Ju-p+*7OQkuzK|2p3P@jJM>9Km-_ zB41vFmzyFFO<0ok?7?h$Blp4$#q3RRSY+hN^kOEQnUcXhjl@+jTcKPBB$Ai{(N-aS@q3 zxyf@q$}1&qnGkjiS~foVx{Hs!p2Kl(8fO=0aAC1b706OP0{}RvxA9 z7{D&zTVpS5akfJLv{Uf1=za{zqDWyp8sXsRSj2`_t0`htnS|4sF#4m|@OCrnD|`bH zoAvlf{Ko>W=q%09P0Z|t zgh8la4@vXKc(j>d86H4r@aEyKoS$P37-1ZMxw1uhuzFj;kr8=kmrmH!o9Rl>dGfA+ zZSz3h!6>2}+U{`(Kjbze&lSmg@ehAeq16O525nZR;4%Jekw`Vs1YsV} z;j2+GG#BIC29;t`>)*WfxH;UMQV#QQ4k^v2Ci7#0t|`LE#+^-x54`H)Q*UO`8&|P7 zQ^Kja0_DFlo}}-5@2G|QF@WvXni!3JbUH0+@oUtNFkdZGM8S>Z9P0)_K=*v|8wf*{ z97on}cfy!iAl{7wdc{{jp?CtG*n|Ll#Dbg@&=8IVfsZf@AcO_aa{++9I29=k5CjIW zEgJwRoIE*O!inh|X38GMzEAmvn-8|JbI>5~BA<0pF6OXM%_DD(aQo^F{L=ehz!zRw z$5F>aCWLm%kfFM+cRE$!S!&S<;gkq8ElSL@z z=vV|HttpgToD!HW3Mc|=AnyU8VhQlxC?U%#Eg!=LgsDO*xonhGCt9NPXXBg|v(zU& zB4PA9PEaoRe3PT<4C$VTiDP2YQ=Aw{XNW=?ofDHF6-JLx%HU<9r_-#Y8eu85DUFql z%gZ)?{&|TP?&P7wG%lT<#>v?Ns>Lit556B_``|bZArEla=)xZdi6=z>wz;{5;b;iQ zkwl0!hOkg?ZmM|V!1FwKnGCYoET&3%6&oTI(p7)A0ojxAzM?M!tfI#RHwD1fmhl5v zgK-FH+oT^Bdye!a3L$8pumobD(?6_G^RPlXZ6WK$jk`1Wo@Y+r+`<$A&-&gmZZ0L% zjXFl-5ZR1Ng@Ku=JaS@)8?Sr;FTb&8u|OZ7PfkxSGD&d(uDobw=7rYF!E0%`JI z=QPrVwdPIjEf!{E32)|hWnF<26@jTdD51Z85K^eJedq@b|)AruUw%^CAb-sjPQ`nOoTJvEHWqkLRe#N8Wf`Pvjh@Fu~9uY9pML&Vf7espF;3NZy(Xi2zFZ zu}d+KeRm9K&z!Epl@<<;ySTHmOT~dsZ%8*^G3O>BDTSuR;yl(n33cOH+}b;Zo}VKL z7isJN=&U!!Q?K=YP?*KE3IU-(0Y<0ov-I!7?~) zte+Mt;MBB@vq=ukHirs(4sdG5f=B@tlU3R<1#Pert=Xr@Tr$NvVn)6XfI(vc0QShh zp*KtkAl6C)0Zpi202mMAnB<}{ftQWvoKBA813=!7*2kx9a4Qk*^IV$%XS*)&*=rs? z@>&j?M^(&E&n5sy{{XYDL&qnewX}JN<*h^FgGDO9pwqzN{tgZg4=K-;%Vyw62|*sO zMb|(&4xL*mg)mGkj4dFGB9&GuNu6c)poS~AAL6LhEzed6&#&BxNkvnXbrE)Zb~6(p~UbJJ&lx@o~&2` zVuHnQ&{4UE6TAiX#w^-zAgEHx^si`{@Bn$3FoPt_7BjP$z$4nCG=Wdv{@Q_s558)} zRmd#18)a0d$~Zk=K&6yHx!{4Mua8^geQg~y(bs~V{q`}A4|dS+G~ii^c#t&g7ohub zFc=~&j0D;n)9yTh7ZJie519;|14qYA++E(pz2z;mx_vm3h}>iTF5X$8rT97`voLG$ zkL~rP7?5rEM-M-LdF8Ku@#@;&h?~aWkhX|Im9*LPv2obJach9IaeI`w-9ezijDL7k z6i7^una^Z0WC>7Jb2es64yKD)>IxW+$5`DuBaOCUrm&_fn{G-ZX9Zl4kcJATsi`$`Rrx{yWe{N??Sbp9J% zMIKrhApt3(ZcLX1Hje`I$4XxpU@C9LU~cQFOD3rDa2Eh9TnQNSK~3rbnFb?6^2SiF zU~n+bWeOrPt%7oj6O}^^Xh^4io2nGh)Qrj3(!xYo0mYwzJ75{aiMOR-Og+ zsl12jQjUsFp$h4GJCbNWlq2&pt_H6RTvs=WAJ%dG?lSJKY@yli6Qw9V|9I=L{gWH} zja#t+fWq5&vA*5__5?h-5P$-I5ktK%3!t-8Yk&N6mskGMi#Im@L8CLc?n#?m@txxy zHV)e841JK-75GTiW@%d{=ZL(t?ZXDbP!aHzaxUhkJS@&kp&OmS<=q6(E@G!U4G0^aqd|$;Pe3{;iDhlL zN+@_-8Vf`6nq_l%U)BT8%vm@)OG9qcVIoAySl$b;w5PDL8&H)p**{kj$hknInCuxU z#6U(P<`4wjUX{+GUqglOAH6Cove?&|zByBEo}nZfOP5k8E69wej;WIQZn7E`q3vvx^m6I9WzH zp8+*-2T298K^(A{N$RiUB!G7e z{bHhO@r@5^6Z8vT|4o5T`nv$~Q3O8@Gr#cZHR+O4du8DGi#!L8Hmq!-zbx-Y{tn z=ZQ=}@>-&+(DQnbu)*7656x(l=WxudP;#_qsJ=2l^IY@EM)Q=(Q62FIzub2Kw9|4=Uu#bI|JL9#-&qL5QU8mmOCA_p$L=j}AN}x;fAZS*{>Ptr@z32{-n(-7{>em@4HEYj2}KZVuo|8^f%`z|Y}sy@LCP$2eQ5 zAptj?6Gudz zwPFMOko3t&eU_d_3P6gVUdZ92-Fxb+13%{l!;rZ~fofs|Q!~JA3-lh7*p=I|#@gDJ^ZRpM0X9-~X|4 zc042pT)C7Qzx|vu9`jSSjya==8_v^hcN}=aKbX z0=8yn08rTACfi##mN!0fW99Jq{&3s?;0YnJy0bum3RuF45Mk?}bvF)c@5T9$i_e^$ z`7Lp8=dX-!Pz znSG}8gAf3a(ilRn#0l^qVhXvyUP>6z&|W%fFczpaLBQ7Rrnw@0w>u!OW3~hYp&+U2 zjAPOJ={`?`k7dOMqw2>Zivp8vL=e26Jv*~v$;uNZQgWDT^2G#bjgdVIOO4^4_{|XG zLqvlI5SB~FgGCDIT3E(7Id1*prgB!iP3495n*w(?1vU>IsK~?V`4XiWGOj~aN)>^; zp?b4NK$aG4f^p)3b*Rh4(tV*}NS+jIOB%wPLYVq{)S<%4!us=B$Q6pn=8LdwiCVpZ zd^UsO*l(>Kc787YXZyb&Zr2~2KQ$wByNzOe-v%1MQZYqI+wU}w?6HCE>+h*qV6d|p z#ON{fzMS#-bq*~ERqx?e{b$$q8lSmza{711fb%DdIqzhx)u+~vI3ysjsJ8~IS03zq z=+0X0h4?;b0mumb`zxjF$DcVp{hQ7#lz(lr(<`;Q z1EO`zPZtT;+^hrFwUcla&921$popEdF3xlVq&cNuaRCUR5a_%aH)m5$3pOHCl+cO{ zJ$HjuuKOT8871t08VMIg39x}e$lV6DF`+vIf#j)Ki$bbE4;VJ*d7A378bEpY%A`l8 zLtO@^Xu|JiIcyc1ZFD?5*1Yr)mmbap3q zWzaVDs1AIImAyntI>gGh2hYi3zM4TUD=B&JQvOvDc*VWCBq>ces6zBRE%?I$f?#Z@ z{-*rFKm~AI&i~OU>!j zzyLr1_TVf~7l0bv+o=8gr(Rq7v++&vKYc$uNVq!9?&yBJ@8AB!C$IgTIF$To3}Ryd z1<>z`7jqs+r1U;&6R<2{3jz1I-QSJf`yc<*>ns2A-bVeG1|z@T>Gg4cZJ(4k4vyOZ zfUK7caLr(0I*V$#g2rG5wQdE+y%KKkRL~DgkPZ#vg-PsW4+eU=QrZH=Y;_*As{o`^ zf+~5$>%>a~=BEVFH0H||TrIkoTGjx1Xt-iOqm*m0p@ZA7|b_30$Jq)@{l1P`*PUldjgQj(B zk(Clz$@D_cb3dto?KmiwD_A&n4yDQr9O=MwEQCRL_{zvy27LV+$fK9MHQP%(C<Ke@iw{M>vs|8yK4-W&O0Pv_h6kIErx;1cm-?$Tfx?~g1h znZa&#hxN<%c78Fg6n*YnFV6qAZ@n=02iv_NI-5fja&62^6{tBqpK<6ve4rD%L8!1% zuVC5tFkd*v>Cz!)@_p9(((OAz$P9eTX7NM8`F5o3Oko1_0AZ8 zQvfWO#4CqeBvPqrW1fqspR zFK>xVKr{9$y`Sb~KnSll7&X>*8?XG9?>_ejv!wi)Oz|F}yF142Q42Gr91_O0=Q*jW zEH!nH?HmpVS?si`sATFmSE*sKlscLUB87*DLCoTu4=g~3q2P%@W}a8K4RM&t`tm7o ziB$SWC;N2*S~tL(;cKc$YfAFa^P0lUPjD^|VD4Cm$-c`pqz;9P^4VVlSM(4L>rg=t zAgLOmktsu7oBqrbLXHADPvrxy3j&yC&%)g;iQDTIe3cB#&0wZ1DRk2$QjB~bjrIV| zb~g#bj)kz9ho(1D<33-JRwoJ2(rq9j8c>}7imgO6AQgsi z9XrY8?#5NeAO6zSwI7Ys7h5`gQq=c#Pv85FzV{_K0to=_vSND=$leOn)Fad~#1o`q znCIdorUEB2`^M*Bugrq!!HqemM%$s@cInth_oNBa8zbQ2|F8^bmVTV9ocIChqLqsZ zgHe!#v(%chU2Eg8*+aFI$J}(0DyfdN;kgRgP{H@}@o^bk*_}qE(Zq$yAx=#7Auxhs zbTD`e1aL6DC}C6rSq57YXt9T7NH<4HbH~6#K0DBCS>j|l8m%5$ojw%3*23a^LUx_ObfrY(RQj6J*`^wSX)+QlI=_}?r;suX znM22UM+L`qk;xUw1CvsFU>49G2HP*+T>s%OTwVLQTBEy`dSCz|4Pq8}9r6A9=nCb# zHAnXNpvHjWYYPqU?a6p+5t~t3R%HBmtelk=xlY{z%bPWH`$Pdj)^$mrYP#%UVY&og zoW#}rXYu^!X?KfZTJbHSbvg z1JI++QWC4BJ;ljs;rtk!7ze=od?wBkzC>d{`9Mf(2o=DB(w`5;`y-?<)ugfATdGv`E}+k zLLL}kW3wi3W6i-<%|$+&r}2O;@#Z9XTA>O_HMrL3pxYY)l>C=CwK$8D^V29~GijR= z6KW$?QlxGWv-#b~f${tX3vMP$z~y)uKu`seNUn{}aP^J5o8KR|+Py!f|7-wob;K~# z_aB)2P3s}2Sje7uALV;MHo2%rfSN`2m5^j73lThah)qJLU|?2Z9hk><^^MZe05&o; z(CB@v#|pXm7fc#cky4~!K;?ER0Z^|dvA4&eLR=(3cNn1F>{6q8G3OB%EzxK9q=Y1r zCXjIi#z8VPrHYMa3#UqlI8$lDv&S@S1ki+5Lkfrs228RA9}u)%{j?r#0SXWv-*>6qVmq>D`jaGp1!HAf$kjJsN_ububx z9+1((F}%TVk=pDpQ?JYfHuEu8Ehk%>z(7it?(-R_G|;hEmepW3*3W14>oSiq{w`9H zVy3p>a=T8>Q(v9!*d$HuNgGpsNMP3PkJ0VL)<44jQ5&BfL|fHUO=vd&oZ(8ypjm~mSSSPjEep5+E>daXXrY*yrOH4KwAU{9*Dl1-{$ z7z!8kTFc@&kme#-A}H-LfDK`jlN_%-z;+=d^~oX8kzN#6$MObY_Ml0z-(uqZyGwr20xbvS3Nym(`$s@wjSeiQZiR78EBof5tZ15 z1w)eCb0p<#iUd+VS_zm9#%OhGG=Ozsb}9z7BK6jVic}a-%*pr|7b}e-w%R=`M6`H9OzIyKg**7Yv@t5fu z-Wq?Aj}v+-eqJ}5vjJxHQ-BkwL!=%YgItFJH}%@+Gdlm@&qx!UeURrRC4>|tT}(GV z0s?>({BmJVFJkaK`0$8V8oh!pv3+n5&j(27%I5!n|i=9C+C57u`#RUZhRZ|&AUl*5l2uHz!qK1ASMJ(+oT)lY(d+YnidkeT&O+rXXo{uFcfsUhQ zkAyM(AfWGIH0q<#XwrBzCZ%!_0-y$NzC)lDh5<-H)kUO?e9qy4a$n`j40&Rarm}ch z58DULt1(gff4_Qr5^tl0^8#cHY77{b31W|> zXU3XloUc=03t_f6WjMb_p8>SZodhMY=jTmc>r@vD1PbYp6vuIhs2TJ!hnka( z`23vT(_y4-*>J23M!vx9jXrMOxsRj0UF2kd(=$2ZU^zDBwMbO3-s;f|$h3$s9u0AL zSfhU=p;n-9Byq>+ABU8ZJT5^wM()GZvAAw)qW7nNP62&?PTB`aK$gl5Nyo}|S1Q~mQoQL4&UA^w>b6K>_mCT`D zM|!^vUW^K@A1cx{o1MyIx}2k%KcywuuB-d(ETkG_k(M!zEJaCX&&!Y}2cd8(8Co%2 zM7=%4-f@={JL3EqC9UK98HJqXVir=504wJe`a_B4xQJIaXEB|v;nH*+bEP3%CxD`E z0^)|H-WaWUB%0A^3NR??l$lw209DwtK7+x%Tsm(|9TfqNh&HfUnqli~QB!b`Ed% zCH@=OQv(F6lh7E z)(^3D*h z4=kmO@B|^l6^@J?k(u8jI;wFbH*dr2GB92TT8NJ(Y}EP=HXn9y_vRfm_STV=Aui14 zK^~a2D6bX&?Zn56JsL<&tr!pcI6khUR&PQ|o6K>j0ts&UqFD}fomCi8_{pD()mfD# zU_};=;&W_l@4?BIQ7t+2&qkAfqZp*V^tm^e{@2ar{VyhMZ!rnl&bZP|IEAs=N+%)O zo?EO~#at$ewZa?~pCF(0qHlj{F*tXk5+&ffymf@{e)i;K=>zXAGEtg*2QI-M`Ay#z z0O)62N5(ghbM?Wt7lT?Ff`S3+EkaHPvyis=c}X@&Ebi)aI2yp(FuMWz8qEACipGv@ z5pyBYJL~i#3bMvK+Q~{E0*aFJk6KhpvC!Ls zhkUTmB8QZm>|wSoC`oP+wi^WmIs|5JErN}$n!vRc7hClKT=!!rx)Dy#&j|B!Qvry1EDx zqJG@O&Owc+F}5usT_Vu}fQtKezwrFE)&IxdH2~SMb=`g1x39-DNk(y=ZNJ~P?ZR){ zwr!iAo~_uY2T(PW4Tv2f?hnvG!WO^U{&&XEEzqE4YsH zW%umTFvF4_s{ZNoiy^+@8n<6F(i!fnz{#n3eR8%YzpwD~AGkL7$n(djb(nXp=;7P8 z!o{UEcy(l9dM%YN&9=N>n*aPlV%gHZGZqqm=%J4;9_tK2>|FM z%gK722`I%-3{psNa}EI<-lOH_b2IZKvy5+KRCeYai`JhUD*(^cF_W|4m6-~hz-P8} zFTh}N4L`3(D>ZSY6>%O(QtA;9F-{tSEYl!MslY13TaB5EyxJ6(o+A|sgcvw`Nx<%t z8afh5`S1etRZJ-4@VG=ymAdz&zFJ#tsA(eM0C>%HTH+1@k3OF&Es5Gfo(o<;CFgp* zNyC+*T~O}y9%W^5Ml4iw-=u$o`GrO7M=lUiS!g=lH+3PWlflGn?W2#peEN?t1A87p zu!(=qCMuv*#y51YBL^@T>8}{C9GkXKDL8P3x})7kr_JZ~pM$Ub)SEiDTr~{;^vHfV zbb1!9-7=VEV6P#>&wbmc!G|%1QLhI1I@XhW&)8EJYc{6)<`wvAbU!`SgK`FLr$-6n+$ z{e)U?HH%?=fbQNw6?vWJ*NVaw)i z;59CSp(XyI@|u)$l+7tL(=_EhYMUg%Ju$Dt?h_%*)-0%&iqfTuGB4MTd0>rJ8~b0B zbtc)K?{{c-@jR_ni47R8e`x55h!k0s%`yFf$O$d;~J>wyTCc$1+39 z(lvbZif}07>zbe@zT91t)R5N>U|AS)0`g%D{BuWc&T4=9T|3)1Zr>=dr)EC0|EzOj za?WOZm^{*BZY@=M(oZ$p4DcBUdZ{TV-(<*RX4pVkpMRBV7??SJ+K%Dg&u|^1NB)p3 zt+xxd_v5kv$sc}It~@4~UgE;zHaoNNmeod6Ky z_;vu$zO=@5$+$I}Isz^eN?uxdL9RD7)N6|~u;sdqS!y1AAT5##w@rh-9;%4Y1afJ4 zjs8x^Yr$(n9kZ`yV+}$z{gpwqiCA%hK%Wy;3FH7s%d1#I zF2W*S!wBO2SeH0^?NjF$|NG-Fo&1aAlMBz`=WGCg#oo9)Y=VhW!SQ#F_6Ivgd&7T! z^04-K_g(E{CLHut3$T5p_f2G8S%`oA#jj4?%Hqh`OXd@MPCJp-iy2JJ>Y5iaDrmP&~~;i@HB8QXnKR48a@6mL@8bLXw}XSz7Q z6cc$7z-;QFMNRB!{X3SBe{y1K{s3lRg7r_^e7E@eZ5y;(c5U(wp1J_rhI@S^GHd-^ zg~)ZRw}1ndh4}6-zYi`SS(LGUdu;b98~YVG+<$W}v*6ATffpy!ras1EU?!C8Kv^T{L-64Txyrk|DShPK#fV0sK zFoSD?2v^B4MFLti?~_D0fT2oFow#W_0ogX#zI_|`^^2g1q_m_=$IvyRQLHos?mz+@ zo6-?LY;p)H7c9yu9gFyN#Ql<#l6oViIS2r9A8M!uz7UX*QrW&H-|3*Qt?nn8EQ