From 8d822c24d5baa6d70bd2ac77a5ba36c81ff9e20f Mon Sep 17 00:00:00 2001 From: Ximin Luo Date: Fri, 31 May 2019 08:29:11 +0100 Subject: [PATCH 1/1] Import cargo_0.35.0.orig.tar.gz [dgit import orig cargo_0.35.0.orig.tar.gz] --- .github/ISSUE_TEMPLATE/bug_report.md | 28 + .github/ISSUE_TEMPLATE/feature_request.md | 16 + .gitignore | 14 + .travis.yml | 52 + ARCHITECTURE.md | 133 + CONTRIBUTING.md | 202 + Cargo.toml | 112 + LICENSE-APACHE | 201 + LICENSE-MIT | 23 + LICENSE-THIRD-PARTY | 1272 +++++ README.md | 90 + appveyor.yml | 26 + src/bin/cargo/cli.rs | 242 + src/bin/cargo/commands/bench.rs | 105 + src/bin/cargo/commands/build.rs | 61 + src/bin/cargo/commands/check.rs | 75 + src/bin/cargo/commands/clean.rs | 35 + src/bin/cargo/commands/doc.rs | 65 + src/bin/cargo/commands/fetch.rs | 34 + src/bin/cargo/commands/fix.rs | 151 + src/bin/cargo/commands/generate_lockfile.rs | 15 + src/bin/cargo/commands/git_checkout.rs | 36 + src/bin/cargo/commands/init.rs | 20 + src/bin/cargo/commands/install.rs | 134 + src/bin/cargo/commands/locate_project.rs | 34 + src/bin/cargo/commands/login.rs | 27 + src/bin/cargo/commands/metadata.rs | 53 + src/bin/cargo/commands/mod.rs | 104 + src/bin/cargo/commands/new.rs | 28 + src/bin/cargo/commands/owner.rs | 50 + src/bin/cargo/commands/package.rs | 52 + src/bin/cargo/commands/pkgid.rs | 41 + src/bin/cargo/commands/publish.rs | 50 + src/bin/cargo/commands/read_manifest.rs | 21 + src/bin/cargo/commands/run.rs | 96 + src/bin/cargo/commands/rustc.rs | 74 + src/bin/cargo/commands/rustdoc.rs | 70 + src/bin/cargo/commands/search.rs | 31 + src/bin/cargo/commands/test.rs | 151 + src/bin/cargo/commands/uninstall.rs | 30 + src/bin/cargo/commands/update.rs | 53 + src/bin/cargo/commands/verify_project.rs | 30 + src/bin/cargo/commands/version.rs | 14 + src/bin/cargo/commands/yank.rs | 43 + src/bin/cargo/main.rs | 215 + src/cargo/core/compiler/build_config.rs | 213 + src/cargo/core/compiler/build_context/mod.rs | 406 ++ .../compiler/build_context/target_info.rs | 291 + src/cargo/core/compiler/build_plan.rs | 162 + src/cargo/core/compiler/compilation.rs | 310 ++ .../compiler/context/compilation_files.rs | 523 ++ src/cargo/core/compiler/context/mod.rs | 568 ++ .../compiler/context/unit_dependencies.rs | 543 ++ src/cargo/core/compiler/custom_build.rs | 694 +++ src/cargo/core/compiler/fingerprint.rs | 895 +++ src/cargo/core/compiler/job.rs | 71 + src/cargo/core/compiler/job_queue.rs | 582 ++ src/cargo/core/compiler/layout.rs | 212 + src/cargo/core/compiler/mod.rs | 1043 ++++ src/cargo/core/compiler/output_depinfo.rs | 128 + src/cargo/core/dependency.rs | 486 ++ src/cargo/core/features.rs | 426 ++ src/cargo/core/interning.rs | 107 + src/cargo/core/manifest.rs | 931 ++++ src/cargo/core/mod.rs | 32 + src/cargo/core/package.rs | 937 ++++ src/cargo/core/package_id.rs | 244 + src/cargo/core/package_id_spec.rs | 375 ++ src/cargo/core/profiles.rs | 680 +++ src/cargo/core/registry.rs | 668 +++ src/cargo/core/resolver/conflict_cache.rs | 198 + src/cargo/core/resolver/context.rs | 419 ++ src/cargo/core/resolver/encode.rs | 423 ++ src/cargo/core/resolver/errors.rs | 295 + src/cargo/core/resolver/mod.rs | 908 ++++ src/cargo/core/resolver/resolve.rs | 284 + src/cargo/core/resolver/types.rs | 519 ++ src/cargo/core/shell.rs | 467 ++ src/cargo/core/source/mod.rs | 309 ++ src/cargo/core/source/source_id.rs | 589 ++ src/cargo/core/summary.rs | 411 ++ src/cargo/core/workspace.rs | 910 ++++ src/cargo/lib.rs | 216 + src/cargo/macros.rs | 49 + src/cargo/ops/cargo_clean.rs | 147 + src/cargo/ops/cargo_compile.rs | 880 +++ src/cargo/ops/cargo_doc.rs | 110 + src/cargo/ops/cargo_fetch.rs | 65 + src/cargo/ops/cargo_generate_lockfile.rs | 220 + src/cargo/ops/cargo_install.rs | 503 ++ src/cargo/ops/cargo_new.rs | 767 +++ src/cargo/ops/cargo_output_metadata.rs | 138 + src/cargo/ops/cargo_package.rs | 511 ++ src/cargo/ops/cargo_pkgid.rs | 16 + src/cargo/ops/cargo_read_manifest.rs | 201 + src/cargo/ops/cargo_run.rs | 104 + src/cargo/ops/cargo_test.rs | 201 + src/cargo/ops/cargo_uninstall.rs | 157 + .../ops/common_for_install_and_uninstall.rs | 251 + src/cargo/ops/fix.rs | 691 +++ src/cargo/ops/lockfile.rs | 190 + src/cargo/ops/mod.rs | 48 + src/cargo/ops/registry.rs | 773 +++ src/cargo/ops/resolve.rs | 593 ++ src/cargo/sources/config.rs | 256 + src/cargo/sources/directory.rs | 219 + src/cargo/sources/git/mod.rs | 4 + src/cargo/sources/git/source.rs | 299 + src/cargo/sources/git/utils.rs | 923 ++++ src/cargo/sources/mod.rs | 13 + src/cargo/sources/path.rs | 577 ++ src/cargo/sources/registry/index.rs | 316 ++ src/cargo/sources/registry/local.rs | 115 + src/cargo/sources/registry/mod.rs | 637 +++ src/cargo/sources/registry/remote.rs | 288 + src/cargo/sources/replaced.rs | 122 + src/cargo/util/cfg.rs | 278 + src/cargo/util/command_prelude.rs | 503 ++ src/cargo/util/config.rs | 1647 ++++++ src/cargo/util/dependency_queue.rs | 233 + src/cargo/util/diagnostic_server.rs | 293 + src/cargo/util/errors.rs | 389 ++ src/cargo/util/flock.rs | 341 ++ src/cargo/util/graph.rs | 134 + src/cargo/util/hex.rs | 27 + src/cargo/util/important_paths.rs | 32 + src/cargo/util/job.rs | 142 + src/cargo/util/lev_distance.rs | 56 + src/cargo/util/lockserver.rs | 171 + src/cargo/util/machine_message.rs | 75 + src/cargo/util/mod.rs | 82 + src/cargo/util/network.rs | 127 + src/cargo/util/paths.rs | 332 ++ src/cargo/util/process_builder.rs | 387 ++ src/cargo/util/profile.rs | 89 + src/cargo/util/progress.rs | 419 ++ src/cargo/util/read2.rs | 179 + src/cargo/util/rustc.rs | 278 + src/cargo/util/sha256.rs | 29 + src/cargo/util/to_semver.rs | 33 + src/cargo/util/to_url.rs | 24 + src/cargo/util/toml/mod.rs | 1523 ++++++ src/cargo/util/toml/targets.rs | 823 +++ src/cargo/util/vcs.rs | 102 + src/cargo/util/workspace.rs | 75 + src/crates-io/Cargo.toml | 23 + src/crates-io/LICENSE-APACHE | 1 + src/crates-io/LICENSE-MIT | 1 + src/crates-io/lib.rs | 354 ++ src/doc/Makefile | 28 + src/doc/README.md | 47 + src/doc/asciidoc-extension.rb | 109 + src/doc/book.toml | 2 + src/doc/man/cargo-bench.adoc | 142 + src/doc/man/cargo-build.adoc | 94 + src/doc/man/cargo-check.adoc | 87 + src/doc/man/cargo-clean.adoc | 76 + src/doc/man/cargo-doc.adoc | 95 + src/doc/man/cargo-fetch.adoc | 57 + src/doc/man/cargo-fix.adoc | 138 + src/doc/man/cargo-generate-lockfile.adoc | 49 + src/doc/man/cargo-help.adoc | 28 + src/doc/man/cargo-init.adoc | 55 + src/doc/man/cargo-install.adoc | 130 + src/doc/man/cargo-locate-project.adoc | 46 + src/doc/man/cargo-login.adoc | 51 + src/doc/man/cargo-metadata.adoc | 286 + src/doc/man/cargo-new.adoc | 50 + src/doc/man/cargo-owner.adoc | 80 + src/doc/man/cargo-package.adoc | 95 + src/doc/man/cargo-pkgid.adoc | 94 + src/doc/man/cargo-publish.adoc | 90 + src/doc/man/cargo-run.adoc | 89 + src/doc/man/cargo-rustc.adoc | 94 + src/doc/man/cargo-rustdoc.adoc | 96 + src/doc/man/cargo-search.adoc | 49 + src/doc/man/cargo-test.adoc | 152 + src/doc/man/cargo-uninstall.adoc | 57 + src/doc/man/cargo-update.adoc | 81 + src/doc/man/cargo-verify-project.adoc | 57 + src/doc/man/cargo-version.adoc | 39 + src/doc/man/cargo-yank.adoc | 64 + src/doc/man/cargo.adoc | 217 + src/doc/man/description-install-root.adoc | 7 + src/doc/man/description-new-authors.adoc | 24 + src/doc/man/description-one-target.adoc | 4 + src/doc/man/generated/cargo-bench.html | 466 ++ src/doc/man/generated/cargo-build.html | 425 ++ src/doc/man/generated/cargo-check.html | 420 ++ src/doc/man/generated/cargo-clean.html | 224 + src/doc/man/generated/cargo-doc.html | 381 ++ src/doc/man/generated/cargo-fetch.html | 188 + src/doc/man/generated/cargo-fix.html | 499 ++ .../generated/cargo-generate-lockfile.html | 166 + src/doc/man/generated/cargo-help.html | 53 + src/doc/man/generated/cargo-init.html | 255 + src/doc/man/generated/cargo-install.html | 322 ++ .../man/generated/cargo-locate-project.html | 152 + src/doc/man/generated/cargo-login.html | 160 + src/doc/man/generated/cargo-metadata.html | 438 ++ src/doc/man/generated/cargo-new.html | 248 + src/doc/man/generated/cargo-owner.html | 212 + src/doc/man/generated/cargo-package.html | 301 ++ src/doc/man/generated/cargo-pkgid.html | 241 + src/doc/man/generated/cargo-publish.html | 300 + src/doc/man/generated/cargo-run.html | 350 ++ src/doc/man/generated/cargo-rustc.html | 413 ++ src/doc/man/generated/cargo-rustdoc.html | 417 ++ src/doc/man/generated/cargo-search.html | 158 + src/doc/man/generated/cargo-test.html | 528 ++ src/doc/man/generated/cargo-uninstall.html | 183 + src/doc/man/generated/cargo-update.html | 216 + .../man/generated/cargo-verify-project.html | 174 + src/doc/man/generated/cargo-version.html | 76 + src/doc/man/generated/cargo-yank.html | 188 + src/doc/man/generated/cargo.html | 433 ++ src/doc/man/options-common.adoc | 7 + src/doc/man/options-display.adoc | 22 + src/doc/man/options-features.adoc | 16 + src/doc/man/options-index.adoc | 2 + src/doc/man/options-jobs.adoc | 5 + src/doc/man/options-locked.adoc | 10 + src/doc/man/options-manifest-path.adoc | 3 + src/doc/man/options-message-format.adoc | 6 + src/doc/man/options-new.adoc | 29 + src/doc/man/options-package.adoc | 7 + src/doc/man/options-packages.adoc | 18 + src/doc/man/options-profile.adoc | 6 + src/doc/man/options-registry.adoc | 4 + src/doc/man/options-release.adoc | 3 + src/doc/man/options-target-dir.adoc | 5 + src/doc/man/options-target-triple.adoc | 8 + src/doc/man/options-targets-lib-bin.adoc | 8 + src/doc/man/options-targets.adoc | 39 + src/doc/man/options-test.adoc | 8 + src/doc/man/options-token.adoc | 10 + src/doc/man/section-environment.adoc | 4 + src/doc/man/section-exit-status.adoc | 7 + src/doc/man/section-profiles.adoc | 26 + src/doc/src/SUMMARY.md | 70 + src/doc/src/appendix/glossary.md | 190 + src/doc/src/commands/build-commands.md | 1 + src/doc/src/commands/cargo-bench.md | 3 + src/doc/src/commands/cargo-build.md | 3 + src/doc/src/commands/cargo-check.md | 3 + src/doc/src/commands/cargo-clean.md | 3 + src/doc/src/commands/cargo-doc.md | 3 + src/doc/src/commands/cargo-fetch.md | 3 + src/doc/src/commands/cargo-fix.md | 3 + .../src/commands/cargo-generate-lockfile.md | 3 + src/doc/src/commands/cargo-help.md | 3 + src/doc/src/commands/cargo-init.md | 3 + src/doc/src/commands/cargo-install.md | 3 + src/doc/src/commands/cargo-locate-project.md | 3 + src/doc/src/commands/cargo-login.md | 3 + src/doc/src/commands/cargo-metadata.md | 3 + src/doc/src/commands/cargo-new.md | 3 + src/doc/src/commands/cargo-owner.md | 3 + src/doc/src/commands/cargo-package.md | 3 + src/doc/src/commands/cargo-pkgid.md | 3 + src/doc/src/commands/cargo-publish.md | 3 + src/doc/src/commands/cargo-run.md | 3 + src/doc/src/commands/cargo-rustc.md | 3 + src/doc/src/commands/cargo-rustdoc.md | 3 + src/doc/src/commands/cargo-search.md | 3 + src/doc/src/commands/cargo-test.md | 3 + src/doc/src/commands/cargo-uninstall.md | 3 + src/doc/src/commands/cargo-update.md | 3 + src/doc/src/commands/cargo-verify-project.md | 3 + src/doc/src/commands/cargo-version.md | 3 + src/doc/src/commands/cargo-yank.md | 3 + src/doc/src/commands/command-common.html | 18 + src/doc/src/commands/general-commands.md | 1 + src/doc/src/commands/index.md | 3 + src/doc/src/commands/manifest-commands.md | 1 + src/doc/src/commands/package-commands.md | 1 + src/doc/src/commands/publishing-commands.md | 1 + src/doc/src/faq.md | 193 + src/doc/src/getting-started/first-steps.md | 73 + src/doc/src/getting-started/index.md | 6 + src/doc/src/getting-started/installation.md | 37 + src/doc/src/guide/build-cache.md | 14 + src/doc/src/guide/cargo-toml-vs-cargo-lock.md | 103 + src/doc/src/guide/continuous-integration.md | 88 + src/doc/src/guide/creating-a-new-project.md | 91 + src/doc/src/guide/dependencies.md | 90 + src/doc/src/guide/index.md | 14 + src/doc/src/guide/project-layout.md | 35 + src/doc/src/guide/tests.md | 39 + src/doc/src/guide/why-cargo-exists.md | 12 + .../guide/working-on-an-existing-project.md | 22 + src/doc/src/images/Cargo-Logo-Small.png | Bin 0 -> 58168 bytes src/doc/src/images/auth-level-acl.png | Bin 0 -> 90300 bytes src/doc/src/images/org-level-acl.png | Bin 0 -> 76572 bytes src/doc/src/index.md | 30 + src/doc/src/reference/build-scripts.md | 570 ++ src/doc/src/reference/config.md | 201 + .../src/reference/environment-variables.md | 144 + src/doc/src/reference/external-tools.md | 114 + src/doc/src/reference/index.md | 14 + src/doc/src/reference/manifest.md | 901 +++ src/doc/src/reference/pkgid-spec.md | 44 + src/doc/src/reference/publishing.md | 222 + src/doc/src/reference/registries.md | 590 ++ src/doc/src/reference/source-replacement.md | 139 + .../src/reference/specifying-dependencies.md | 611 +++ src/doc/src/reference/unstable.md | 232 + src/doc/theme/favicon.png | Bin 0 -> 5430 bytes src/etc/_cargo | 544 ++ src/etc/cargo.bashcomp.sh | 262 + src/etc/man/cargo-bench.1 | 501 ++ src/etc/man/cargo-build.1 | 443 ++ src/etc/man/cargo-check.1 | 439 ++ src/etc/man/cargo-clean.1 | 227 + src/etc/man/cargo-doc.1 | 380 ++ src/etc/man/cargo-fetch.1 | 183 + src/etc/man/cargo-fix.1 | 526 ++ src/etc/man/cargo-generate-lockfile.1 | 168 + src/etc/man/cargo-help.1 | 75 + src/etc/man/cargo-init.1 | 364 ++ src/etc/man/cargo-install.1 | 370 ++ src/etc/man/cargo-locate-project.1 | 155 + src/etc/man/cargo-login.1 | 163 + src/etc/man/cargo-metadata.1 | 428 ++ src/etc/man/cargo-new.1 | 359 ++ src/etc/man/cargo-owner.1 | 238 + src/etc/man/cargo-package.1 | 342 ++ src/etc/man/cargo-pkgid.1 | 279 + src/etc/man/cargo-publish.1 | 319 ++ src/etc/man/cargo-run.1 | 358 ++ src/etc/man/cargo-rustc.1 | 429 ++ src/etc/man/cargo-rustdoc.1 | 419 ++ src/etc/man/cargo-search.1 | 167 + src/etc/man/cargo-test.1 | 590 ++ src/etc/man/cargo-uninstall.1 | 223 + src/etc/man/cargo-update.1 | 232 + src/etc/man/cargo-verify-project.1 | 178 + src/etc/man/cargo-version.1 | 99 + src/etc/man/cargo-yank.1 | 194 + src/etc/man/cargo.1 | 473 ++ tests/testsuite/alt_registry.rs | 1134 ++++ tests/testsuite/bad_config.rs | 1282 +++++ tests/testsuite/bad_manifest_path.rs | 382 ++ tests/testsuite/bench.rs | 1631 ++++++ tests/testsuite/build.rs | 4809 +++++++++++++++++ tests/testsuite/build_auth.rs | 258 + tests/testsuite/build_lib.rs | 63 + tests/testsuite/build_plan.rs | 220 + tests/testsuite/build_script.rs | 3698 +++++++++++++ tests/testsuite/build_script_env.rs | 108 + tests/testsuite/cargo_alias_config.rs | 176 + tests/testsuite/cargo_command.rs | 338 ++ tests/testsuite/cargo_features.rs | 318 ++ tests/testsuite/cfg.rs | 448 ++ tests/testsuite/check-style.sh | 3 + tests/testsuite/check.rs | 771 +++ tests/testsuite/clean.rs | 299 + tests/testsuite/collisions.rs | 105 + tests/testsuite/concurrent.rs | 531 ++ tests/testsuite/config.rs | 694 +++ tests/testsuite/corrupt_git.rs | 160 + tests/testsuite/cross_compile.rs | 1231 +++++ tests/testsuite/cross_publish.rs | 113 + tests/testsuite/custom_target.rs | 130 + tests/testsuite/death.rs | 143 + tests/testsuite/dep_info.rs | 105 + tests/testsuite/directory.rs | 765 +++ tests/testsuite/doc.rs | 1408 +++++ tests/testsuite/edition.rs | 36 + tests/testsuite/features.rs | 1869 +++++++ tests/testsuite/fetch.rs | 114 + tests/testsuite/fix.rs | 1291 +++++ tests/testsuite/freshness.rs | 1619 ++++++ tests/testsuite/generate_lockfile.rs | 237 + tests/testsuite/git.rs | 2882 ++++++++++ tests/testsuite/init.rs | 561 ++ tests/testsuite/install.rs | 1365 +++++ tests/testsuite/jobserver.rs | 201 + tests/testsuite/list_targets.rs | 189 + tests/testsuite/local_registry.rs | 485 ++ tests/testsuite/lockfile_compat.rs | 508 ++ tests/testsuite/login.rs | 140 + tests/testsuite/main.rs | 93 + tests/testsuite/member_errors.rs | 154 + tests/testsuite/metabuild.rs | 776 +++ tests/testsuite/metadata.rs | 1671 ++++++ tests/testsuite/net_config.rs | 72 + tests/testsuite/new.rs | 513 ++ tests/testsuite/out_dir.rs | 275 + tests/testsuite/overrides.rs | 1412 +++++ tests/testsuite/package.rs | 1331 +++++ tests/testsuite/patch.rs | 1022 ++++ tests/testsuite/path.rs | 1047 ++++ tests/testsuite/plugins.rs | 434 ++ tests/testsuite/proc_macro.rs | 439 ++ tests/testsuite/profile_config.rs | 391 ++ tests/testsuite/profile_overrides.rs | 441 ++ tests/testsuite/profile_targets.rs | 648 +++ tests/testsuite/profiles.rs | 378 ++ tests/testsuite/publish.rs | 945 ++++ tests/testsuite/read_manifest.rs | 104 + tests/testsuite/registry.rs | 1979 +++++++ tests/testsuite/rename_deps.rs | 389 ++ tests/testsuite/required_features.rs | 1161 ++++ tests/testsuite/resolve.rs | 1279 +++++ tests/testsuite/run.rs | 1216 +++++ tests/testsuite/rustc.rs | 441 ++ tests/testsuite/rustc_info_cache.rs | 102 + tests/testsuite/rustdoc.rs | 176 + tests/testsuite/rustdocflags.rs | 87 + tests/testsuite/rustflags.rs | 1327 +++++ tests/testsuite/search.rs | 216 + tests/testsuite/shell_quoting.rs | 37 + tests/testsuite/small_fd_limits.rs | 117 + tests/testsuite/support/cross_compile.rs | 138 + tests/testsuite/support/git.rs | 224 + tests/testsuite/support/install.rs | 31 + tests/testsuite/support/mod.rs | 1649 ++++++ tests/testsuite/support/paths.rs | 165 + tests/testsuite/support/publish.rs | 124 + tests/testsuite/support/registry.rs | 593 ++ tests/testsuite/support/resolver.rs | 580 ++ tests/testsuite/test.rs | 3525 ++++++++++++ tests/testsuite/tool_paths.rs | 259 + tests/testsuite/update.rs | 476 ++ tests/testsuite/verify_project.rs | 71 + tests/testsuite/version.rs | 42 + tests/testsuite/warn_on_failure.rs | 109 + tests/testsuite/workspaces.rs | 2116 ++++++++ 429 files changed, 131993 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 ARCHITECTURE.md create mode 100644 CONTRIBUTING.md create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 LICENSE-THIRD-PARTY create mode 100644 README.md create mode 100644 appveyor.yml create mode 100644 src/bin/cargo/cli.rs create mode 100644 src/bin/cargo/commands/bench.rs create mode 100644 src/bin/cargo/commands/build.rs create mode 100644 src/bin/cargo/commands/check.rs create mode 100644 src/bin/cargo/commands/clean.rs create mode 100644 src/bin/cargo/commands/doc.rs create mode 100644 src/bin/cargo/commands/fetch.rs create mode 100644 src/bin/cargo/commands/fix.rs create mode 100644 src/bin/cargo/commands/generate_lockfile.rs create mode 100644 src/bin/cargo/commands/git_checkout.rs create mode 100644 src/bin/cargo/commands/init.rs create mode 100644 src/bin/cargo/commands/install.rs create mode 100644 src/bin/cargo/commands/locate_project.rs create mode 100644 src/bin/cargo/commands/login.rs create mode 100644 src/bin/cargo/commands/metadata.rs create mode 100644 src/bin/cargo/commands/mod.rs create mode 100644 src/bin/cargo/commands/new.rs create mode 100644 src/bin/cargo/commands/owner.rs create mode 100644 src/bin/cargo/commands/package.rs create mode 100644 src/bin/cargo/commands/pkgid.rs create mode 100644 src/bin/cargo/commands/publish.rs create mode 100644 src/bin/cargo/commands/read_manifest.rs create mode 100644 src/bin/cargo/commands/run.rs create mode 100644 src/bin/cargo/commands/rustc.rs create mode 100644 src/bin/cargo/commands/rustdoc.rs create mode 100644 src/bin/cargo/commands/search.rs create mode 100644 src/bin/cargo/commands/test.rs create mode 100644 src/bin/cargo/commands/uninstall.rs create mode 100644 src/bin/cargo/commands/update.rs create mode 100644 src/bin/cargo/commands/verify_project.rs create mode 100644 src/bin/cargo/commands/version.rs create mode 100644 src/bin/cargo/commands/yank.rs create mode 100644 src/bin/cargo/main.rs create mode 100644 src/cargo/core/compiler/build_config.rs create mode 100644 src/cargo/core/compiler/build_context/mod.rs create mode 100644 src/cargo/core/compiler/build_context/target_info.rs create mode 100644 src/cargo/core/compiler/build_plan.rs create mode 100644 src/cargo/core/compiler/compilation.rs create mode 100644 src/cargo/core/compiler/context/compilation_files.rs create mode 100644 src/cargo/core/compiler/context/mod.rs create mode 100644 src/cargo/core/compiler/context/unit_dependencies.rs create mode 100644 src/cargo/core/compiler/custom_build.rs create mode 100644 src/cargo/core/compiler/fingerprint.rs create mode 100644 src/cargo/core/compiler/job.rs create mode 100644 src/cargo/core/compiler/job_queue.rs create mode 100644 src/cargo/core/compiler/layout.rs create mode 100644 src/cargo/core/compiler/mod.rs create mode 100644 src/cargo/core/compiler/output_depinfo.rs create mode 100644 src/cargo/core/dependency.rs create mode 100644 src/cargo/core/features.rs create mode 100644 src/cargo/core/interning.rs create mode 100644 src/cargo/core/manifest.rs create mode 100644 src/cargo/core/mod.rs create mode 100644 src/cargo/core/package.rs create mode 100644 src/cargo/core/package_id.rs create mode 100644 src/cargo/core/package_id_spec.rs create mode 100644 src/cargo/core/profiles.rs create mode 100644 src/cargo/core/registry.rs create mode 100644 src/cargo/core/resolver/conflict_cache.rs create mode 100644 src/cargo/core/resolver/context.rs create mode 100644 src/cargo/core/resolver/encode.rs create mode 100644 src/cargo/core/resolver/errors.rs create mode 100644 src/cargo/core/resolver/mod.rs create mode 100644 src/cargo/core/resolver/resolve.rs create mode 100644 src/cargo/core/resolver/types.rs create mode 100644 src/cargo/core/shell.rs create mode 100644 src/cargo/core/source/mod.rs create mode 100644 src/cargo/core/source/source_id.rs create mode 100644 src/cargo/core/summary.rs create mode 100644 src/cargo/core/workspace.rs create mode 100644 src/cargo/lib.rs create mode 100644 src/cargo/macros.rs create mode 100644 src/cargo/ops/cargo_clean.rs create mode 100644 src/cargo/ops/cargo_compile.rs create mode 100644 src/cargo/ops/cargo_doc.rs create mode 100644 src/cargo/ops/cargo_fetch.rs create mode 100644 src/cargo/ops/cargo_generate_lockfile.rs create mode 100644 src/cargo/ops/cargo_install.rs create mode 100644 src/cargo/ops/cargo_new.rs create mode 100644 src/cargo/ops/cargo_output_metadata.rs create mode 100644 src/cargo/ops/cargo_package.rs create mode 100644 src/cargo/ops/cargo_pkgid.rs create mode 100644 src/cargo/ops/cargo_read_manifest.rs create mode 100644 src/cargo/ops/cargo_run.rs create mode 100644 src/cargo/ops/cargo_test.rs create mode 100644 src/cargo/ops/cargo_uninstall.rs create mode 100644 src/cargo/ops/common_for_install_and_uninstall.rs create mode 100644 src/cargo/ops/fix.rs create mode 100644 src/cargo/ops/lockfile.rs create mode 100644 src/cargo/ops/mod.rs create mode 100644 src/cargo/ops/registry.rs create mode 100644 src/cargo/ops/resolve.rs create mode 100644 src/cargo/sources/config.rs create mode 100644 src/cargo/sources/directory.rs create mode 100644 src/cargo/sources/git/mod.rs create mode 100644 src/cargo/sources/git/source.rs create mode 100644 src/cargo/sources/git/utils.rs create mode 100644 src/cargo/sources/mod.rs create mode 100644 src/cargo/sources/path.rs create mode 100644 src/cargo/sources/registry/index.rs create mode 100644 src/cargo/sources/registry/local.rs create mode 100644 src/cargo/sources/registry/mod.rs create mode 100644 src/cargo/sources/registry/remote.rs create mode 100644 src/cargo/sources/replaced.rs create mode 100644 src/cargo/util/cfg.rs create mode 100644 src/cargo/util/command_prelude.rs create mode 100644 src/cargo/util/config.rs create mode 100644 src/cargo/util/dependency_queue.rs create mode 100644 src/cargo/util/diagnostic_server.rs create mode 100644 src/cargo/util/errors.rs create mode 100644 src/cargo/util/flock.rs create mode 100644 src/cargo/util/graph.rs create mode 100644 src/cargo/util/hex.rs create mode 100644 src/cargo/util/important_paths.rs create mode 100644 src/cargo/util/job.rs create mode 100644 src/cargo/util/lev_distance.rs create mode 100644 src/cargo/util/lockserver.rs create mode 100644 src/cargo/util/machine_message.rs create mode 100644 src/cargo/util/mod.rs create mode 100644 src/cargo/util/network.rs create mode 100644 src/cargo/util/paths.rs create mode 100644 src/cargo/util/process_builder.rs create mode 100644 src/cargo/util/profile.rs create mode 100644 src/cargo/util/progress.rs create mode 100644 src/cargo/util/read2.rs create mode 100644 src/cargo/util/rustc.rs create mode 100644 src/cargo/util/sha256.rs create mode 100644 src/cargo/util/to_semver.rs create mode 100644 src/cargo/util/to_url.rs create mode 100644 src/cargo/util/toml/mod.rs create mode 100644 src/cargo/util/toml/targets.rs create mode 100644 src/cargo/util/vcs.rs create mode 100644 src/cargo/util/workspace.rs create mode 100644 src/crates-io/Cargo.toml create mode 120000 src/crates-io/LICENSE-APACHE create mode 120000 src/crates-io/LICENSE-MIT create mode 100644 src/crates-io/lib.rs create mode 100644 src/doc/Makefile create mode 100644 src/doc/README.md create mode 100644 src/doc/asciidoc-extension.rb create mode 100644 src/doc/book.toml create mode 100644 src/doc/man/cargo-bench.adoc create mode 100644 src/doc/man/cargo-build.adoc create mode 100644 src/doc/man/cargo-check.adoc create mode 100644 src/doc/man/cargo-clean.adoc create mode 100644 src/doc/man/cargo-doc.adoc create mode 100644 src/doc/man/cargo-fetch.adoc create mode 100644 src/doc/man/cargo-fix.adoc create mode 100644 src/doc/man/cargo-generate-lockfile.adoc create mode 100644 src/doc/man/cargo-help.adoc create mode 100644 src/doc/man/cargo-init.adoc create mode 100644 src/doc/man/cargo-install.adoc create mode 100644 src/doc/man/cargo-locate-project.adoc create mode 100644 src/doc/man/cargo-login.adoc create mode 100644 src/doc/man/cargo-metadata.adoc create mode 100644 src/doc/man/cargo-new.adoc create mode 100644 src/doc/man/cargo-owner.adoc create mode 100644 src/doc/man/cargo-package.adoc create mode 100644 src/doc/man/cargo-pkgid.adoc create mode 100644 src/doc/man/cargo-publish.adoc create mode 100644 src/doc/man/cargo-run.adoc create mode 100644 src/doc/man/cargo-rustc.adoc create mode 100644 src/doc/man/cargo-rustdoc.adoc create mode 100644 src/doc/man/cargo-search.adoc create mode 100644 src/doc/man/cargo-test.adoc create mode 100644 src/doc/man/cargo-uninstall.adoc create mode 100644 src/doc/man/cargo-update.adoc create mode 100644 src/doc/man/cargo-verify-project.adoc create mode 100644 src/doc/man/cargo-version.adoc create mode 100644 src/doc/man/cargo-yank.adoc create mode 100644 src/doc/man/cargo.adoc create mode 100644 src/doc/man/description-install-root.adoc create mode 100644 src/doc/man/description-new-authors.adoc create mode 100644 src/doc/man/description-one-target.adoc create mode 100644 src/doc/man/generated/cargo-bench.html create mode 100644 src/doc/man/generated/cargo-build.html create mode 100644 src/doc/man/generated/cargo-check.html create mode 100644 src/doc/man/generated/cargo-clean.html create mode 100644 src/doc/man/generated/cargo-doc.html create mode 100644 src/doc/man/generated/cargo-fetch.html create mode 100644 src/doc/man/generated/cargo-fix.html create mode 100644 src/doc/man/generated/cargo-generate-lockfile.html create mode 100644 src/doc/man/generated/cargo-help.html create mode 100644 src/doc/man/generated/cargo-init.html create mode 100644 src/doc/man/generated/cargo-install.html create mode 100644 src/doc/man/generated/cargo-locate-project.html create mode 100644 src/doc/man/generated/cargo-login.html create mode 100644 src/doc/man/generated/cargo-metadata.html create mode 100644 src/doc/man/generated/cargo-new.html create mode 100644 src/doc/man/generated/cargo-owner.html create mode 100644 src/doc/man/generated/cargo-package.html create mode 100644 src/doc/man/generated/cargo-pkgid.html create mode 100644 src/doc/man/generated/cargo-publish.html create mode 100644 src/doc/man/generated/cargo-run.html create mode 100644 src/doc/man/generated/cargo-rustc.html create mode 100644 src/doc/man/generated/cargo-rustdoc.html create mode 100644 src/doc/man/generated/cargo-search.html create mode 100644 src/doc/man/generated/cargo-test.html create mode 100644 src/doc/man/generated/cargo-uninstall.html create mode 100644 src/doc/man/generated/cargo-update.html create mode 100644 src/doc/man/generated/cargo-verify-project.html create mode 100644 src/doc/man/generated/cargo-version.html create mode 100644 src/doc/man/generated/cargo-yank.html create mode 100644 src/doc/man/generated/cargo.html create mode 100644 src/doc/man/options-common.adoc create mode 100644 src/doc/man/options-display.adoc create mode 100644 src/doc/man/options-features.adoc create mode 100644 src/doc/man/options-index.adoc create mode 100644 src/doc/man/options-jobs.adoc create mode 100644 src/doc/man/options-locked.adoc create mode 100644 src/doc/man/options-manifest-path.adoc create mode 100644 src/doc/man/options-message-format.adoc create mode 100644 src/doc/man/options-new.adoc create mode 100644 src/doc/man/options-package.adoc create mode 100644 src/doc/man/options-packages.adoc create mode 100644 src/doc/man/options-profile.adoc create mode 100644 src/doc/man/options-registry.adoc create mode 100644 src/doc/man/options-release.adoc create mode 100644 src/doc/man/options-target-dir.adoc create mode 100644 src/doc/man/options-target-triple.adoc create mode 100644 src/doc/man/options-targets-lib-bin.adoc create mode 100644 src/doc/man/options-targets.adoc create mode 100644 src/doc/man/options-test.adoc create mode 100644 src/doc/man/options-token.adoc create mode 100644 src/doc/man/section-environment.adoc create mode 100644 src/doc/man/section-exit-status.adoc create mode 100644 src/doc/man/section-profiles.adoc create mode 100644 src/doc/src/SUMMARY.md create mode 100644 src/doc/src/appendix/glossary.md create mode 100644 src/doc/src/commands/build-commands.md create mode 100644 src/doc/src/commands/cargo-bench.md create mode 100644 src/doc/src/commands/cargo-build.md create mode 100644 src/doc/src/commands/cargo-check.md create mode 100644 src/doc/src/commands/cargo-clean.md create mode 100644 src/doc/src/commands/cargo-doc.md create mode 100644 src/doc/src/commands/cargo-fetch.md create mode 100644 src/doc/src/commands/cargo-fix.md create mode 100644 src/doc/src/commands/cargo-generate-lockfile.md create mode 100644 src/doc/src/commands/cargo-help.md create mode 100644 src/doc/src/commands/cargo-init.md create mode 100644 src/doc/src/commands/cargo-install.md create mode 100644 src/doc/src/commands/cargo-locate-project.md create mode 100644 src/doc/src/commands/cargo-login.md create mode 100644 src/doc/src/commands/cargo-metadata.md create mode 100644 src/doc/src/commands/cargo-new.md create mode 100644 src/doc/src/commands/cargo-owner.md create mode 100644 src/doc/src/commands/cargo-package.md create mode 100644 src/doc/src/commands/cargo-pkgid.md create mode 100644 src/doc/src/commands/cargo-publish.md create mode 100644 src/doc/src/commands/cargo-run.md create mode 100644 src/doc/src/commands/cargo-rustc.md create mode 100644 src/doc/src/commands/cargo-rustdoc.md create mode 100644 src/doc/src/commands/cargo-search.md create mode 100644 src/doc/src/commands/cargo-test.md create mode 100644 src/doc/src/commands/cargo-uninstall.md create mode 100644 src/doc/src/commands/cargo-update.md create mode 100644 src/doc/src/commands/cargo-verify-project.md create mode 100644 src/doc/src/commands/cargo-version.md create mode 100644 src/doc/src/commands/cargo-yank.md create mode 100644 src/doc/src/commands/command-common.html create mode 100644 src/doc/src/commands/general-commands.md create mode 100644 src/doc/src/commands/index.md create mode 100644 src/doc/src/commands/manifest-commands.md create mode 100644 src/doc/src/commands/package-commands.md create mode 100644 src/doc/src/commands/publishing-commands.md create mode 100644 src/doc/src/faq.md create mode 100644 src/doc/src/getting-started/first-steps.md create mode 100644 src/doc/src/getting-started/index.md create mode 100644 src/doc/src/getting-started/installation.md create mode 100644 src/doc/src/guide/build-cache.md create mode 100644 src/doc/src/guide/cargo-toml-vs-cargo-lock.md create mode 100644 src/doc/src/guide/continuous-integration.md create mode 100644 src/doc/src/guide/creating-a-new-project.md create mode 100644 src/doc/src/guide/dependencies.md create mode 100644 src/doc/src/guide/index.md create mode 100644 src/doc/src/guide/project-layout.md create mode 100644 src/doc/src/guide/tests.md create mode 100644 src/doc/src/guide/why-cargo-exists.md create mode 100644 src/doc/src/guide/working-on-an-existing-project.md create mode 100644 src/doc/src/images/Cargo-Logo-Small.png create mode 100644 src/doc/src/images/auth-level-acl.png create mode 100644 src/doc/src/images/org-level-acl.png create mode 100644 src/doc/src/index.md create mode 100644 src/doc/src/reference/build-scripts.md create mode 100644 src/doc/src/reference/config.md create mode 100644 src/doc/src/reference/environment-variables.md create mode 100644 src/doc/src/reference/external-tools.md create mode 100644 src/doc/src/reference/index.md create mode 100644 src/doc/src/reference/manifest.md create mode 100644 src/doc/src/reference/pkgid-spec.md create mode 100644 src/doc/src/reference/publishing.md create mode 100644 src/doc/src/reference/registries.md create mode 100644 src/doc/src/reference/source-replacement.md create mode 100644 src/doc/src/reference/specifying-dependencies.md create mode 100644 src/doc/src/reference/unstable.md create mode 100644 src/doc/theme/favicon.png create mode 100644 src/etc/_cargo create mode 100644 src/etc/cargo.bashcomp.sh create mode 100644 src/etc/man/cargo-bench.1 create mode 100644 src/etc/man/cargo-build.1 create mode 100644 src/etc/man/cargo-check.1 create mode 100644 src/etc/man/cargo-clean.1 create mode 100644 src/etc/man/cargo-doc.1 create mode 100644 src/etc/man/cargo-fetch.1 create mode 100644 src/etc/man/cargo-fix.1 create mode 100644 src/etc/man/cargo-generate-lockfile.1 create mode 100644 src/etc/man/cargo-help.1 create mode 100644 src/etc/man/cargo-init.1 create mode 100644 src/etc/man/cargo-install.1 create mode 100644 src/etc/man/cargo-locate-project.1 create mode 100644 src/etc/man/cargo-login.1 create mode 100644 src/etc/man/cargo-metadata.1 create mode 100644 src/etc/man/cargo-new.1 create mode 100644 src/etc/man/cargo-owner.1 create mode 100644 src/etc/man/cargo-package.1 create mode 100644 src/etc/man/cargo-pkgid.1 create mode 100644 src/etc/man/cargo-publish.1 create mode 100644 src/etc/man/cargo-run.1 create mode 100644 src/etc/man/cargo-rustc.1 create mode 100644 src/etc/man/cargo-rustdoc.1 create mode 100644 src/etc/man/cargo-search.1 create mode 100644 src/etc/man/cargo-test.1 create mode 100644 src/etc/man/cargo-uninstall.1 create mode 100644 src/etc/man/cargo-update.1 create mode 100644 src/etc/man/cargo-verify-project.1 create mode 100644 src/etc/man/cargo-version.1 create mode 100644 src/etc/man/cargo-yank.1 create mode 100644 src/etc/man/cargo.1 create mode 100644 tests/testsuite/alt_registry.rs create mode 100644 tests/testsuite/bad_config.rs create mode 100644 tests/testsuite/bad_manifest_path.rs create mode 100644 tests/testsuite/bench.rs create mode 100644 tests/testsuite/build.rs create mode 100644 tests/testsuite/build_auth.rs create mode 100644 tests/testsuite/build_lib.rs create mode 100644 tests/testsuite/build_plan.rs create mode 100644 tests/testsuite/build_script.rs create mode 100644 tests/testsuite/build_script_env.rs create mode 100644 tests/testsuite/cargo_alias_config.rs create mode 100644 tests/testsuite/cargo_command.rs create mode 100644 tests/testsuite/cargo_features.rs create mode 100644 tests/testsuite/cfg.rs create mode 100755 tests/testsuite/check-style.sh create mode 100644 tests/testsuite/check.rs create mode 100644 tests/testsuite/clean.rs create mode 100644 tests/testsuite/collisions.rs create mode 100644 tests/testsuite/concurrent.rs create mode 100644 tests/testsuite/config.rs create mode 100644 tests/testsuite/corrupt_git.rs create mode 100644 tests/testsuite/cross_compile.rs create mode 100644 tests/testsuite/cross_publish.rs create mode 100644 tests/testsuite/custom_target.rs create mode 100644 tests/testsuite/death.rs create mode 100644 tests/testsuite/dep_info.rs create mode 100644 tests/testsuite/directory.rs create mode 100644 tests/testsuite/doc.rs create mode 100644 tests/testsuite/edition.rs create mode 100644 tests/testsuite/features.rs create mode 100644 tests/testsuite/fetch.rs create mode 100644 tests/testsuite/fix.rs create mode 100644 tests/testsuite/freshness.rs create mode 100644 tests/testsuite/generate_lockfile.rs create mode 100644 tests/testsuite/git.rs create mode 100644 tests/testsuite/init.rs create mode 100644 tests/testsuite/install.rs create mode 100644 tests/testsuite/jobserver.rs create mode 100644 tests/testsuite/list_targets.rs create mode 100644 tests/testsuite/local_registry.rs create mode 100644 tests/testsuite/lockfile_compat.rs create mode 100644 tests/testsuite/login.rs create mode 100644 tests/testsuite/main.rs create mode 100644 tests/testsuite/member_errors.rs create mode 100644 tests/testsuite/metabuild.rs create mode 100644 tests/testsuite/metadata.rs create mode 100644 tests/testsuite/net_config.rs create mode 100644 tests/testsuite/new.rs create mode 100644 tests/testsuite/out_dir.rs create mode 100644 tests/testsuite/overrides.rs create mode 100644 tests/testsuite/package.rs create mode 100644 tests/testsuite/patch.rs create mode 100644 tests/testsuite/path.rs create mode 100644 tests/testsuite/plugins.rs create mode 100644 tests/testsuite/proc_macro.rs create mode 100644 tests/testsuite/profile_config.rs create mode 100644 tests/testsuite/profile_overrides.rs create mode 100644 tests/testsuite/profile_targets.rs create mode 100644 tests/testsuite/profiles.rs create mode 100644 tests/testsuite/publish.rs create mode 100644 tests/testsuite/read_manifest.rs create mode 100644 tests/testsuite/registry.rs create mode 100644 tests/testsuite/rename_deps.rs create mode 100644 tests/testsuite/required_features.rs create mode 100644 tests/testsuite/resolve.rs create mode 100644 tests/testsuite/run.rs create mode 100644 tests/testsuite/rustc.rs create mode 100644 tests/testsuite/rustc_info_cache.rs create mode 100644 tests/testsuite/rustdoc.rs create mode 100644 tests/testsuite/rustdocflags.rs create mode 100644 tests/testsuite/rustflags.rs create mode 100644 tests/testsuite/search.rs create mode 100644 tests/testsuite/shell_quoting.rs create mode 100644 tests/testsuite/small_fd_limits.rs create mode 100644 tests/testsuite/support/cross_compile.rs create mode 100644 tests/testsuite/support/git.rs create mode 100644 tests/testsuite/support/install.rs create mode 100644 tests/testsuite/support/mod.rs create mode 100644 tests/testsuite/support/paths.rs create mode 100644 tests/testsuite/support/publish.rs create mode 100644 tests/testsuite/support/registry.rs create mode 100644 tests/testsuite/support/resolver.rs create mode 100644 tests/testsuite/test.rs create mode 100644 tests/testsuite/tool_paths.rs create mode 100644 tests/testsuite/update.rs create mode 100644 tests/testsuite/verify_project.rs create mode 100644 tests/testsuite/version.rs create mode 100644 tests/testsuite/warn_on_failure.rs create mode 100644 tests/testsuite/workspaces.rs diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..68aeed3f7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,28 @@ +--- +name: Bug report +about: Create a report to help us improve +labels: C-bug +--- + + + +**Problem** + + + +**Steps** + +1. +2. +3. + +**Possible Solution(s)** + + + +**Notes** + +Output of `cargo version`: + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..68ca4a1b8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,16 @@ +--- +name: Feature request +about: Suggest an idea for this project +labels: C-feature-request +--- + + + +**Describe the problem you are trying to solve** + + +**Describe the solution you'd like** + + +**Notes** + diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..85e363a37 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +/target +Cargo.lock +.cargo +/config.stamp +/Makefile +/config.mk +src/doc/build +src/etc/*.pyc +src/registry/target +rustc +__pycache__ +.idea/ +*.iml +*.swp diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..ba205f0d9 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,52 @@ +language: rust +rust: stable +sudo: required +dist: trusty + +git: + depth: 1 + +matrix: + include: + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + if: branch != master OR type = pull_request + + - env: TARGET=x86_64-apple-darwin + ALT=i686-apple-darwin + os: osx + osx_image: xcode9.2 + if: branch != master OR type = pull_request + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: beta + if: branch != master OR type = pull_request + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: nightly + install: + - travis_retry curl -Lf https://github.com/rust-lang-nursery/mdBook/releases/download/v0.1.7/mdbook-v0.1.7-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=$HOME/.cargo/bin + script: + - cargo test --features=deny-warnings || travis_terminate 1 + - cargo doc --no-deps || travis_terminate 1 + - (cd src/doc && mdbook build --dest-dir ../../target/doc) || travis_terminate 1 + if: branch != master OR type = pull_request + + exclude: + - rust: stable + +before_script: + - rustup target add $ALT +script: + - cargo test --features=deny-warnings + +notifications: + email: + on_success: never + +addons: + apt: + packages: + - gcc-multilib diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md new file mode 100644 index 000000000..dcfccfe7f --- /dev/null +++ b/ARCHITECTURE.md @@ -0,0 +1,133 @@ +# Cargo Architecture + +This document gives a high level overview of Cargo internals. You may +find it useful if you want to contribute to Cargo or if you are +interested in the inner workings of Cargo. + +The purpose of Cargo is to formalize a canonical Rust workflow, by automating +the standard tasks associated with distributing software. Cargo simplifies +structuring a new project, adding dependencies, writing and running unit tests, +and more. + + +## Subcommands + +Cargo is a single binary composed of a set of [`clap`][] subcommands. All subcommands live in +`src/bin/cargo/commands` directory. `src/bin/cargo/main.rs` is the entry point. + +Each subcommand, such as `src/bin/cargo/commands/build.rs`, has its own API +interface, similarly to Git's, parsing command line options, reading the +configuration files, discovering the Cargo project in the current directory and +delegating the actual implementation to one +of the functions in `src/cargo/ops/mod.rs`. This short file is a good +place to find out about most of the things that Cargo can do. +Subcommands are designed to pipe to one another, and custom subcommands make +Cargo easy to extend and attach tools to. + +[`clap`]: https://clap.rs/ + + +## Important Data Structures + +There are some important data structures which are used throughout +Cargo. + +`Config` is available almost everywhere and holds "global" +information, such as `CARGO_HOME` or configuration from +`.cargo/config` files. The `shell` method of `Config` is the entry +point for printing status messages and other info to the console. + +`Workspace` is the description of the workspace for the current +working directory. Each workspace contains at least one +`Package`. Each package corresponds to a single `Cargo.toml`, and may +define several `Target`s, such as the library, binaries, integration +test or examples. Targets are crates (each target defines a crate +root, like `src/lib.rs` or `examples/foo.rs`) and are what is actually +compiled by `rustc`. + +A typical package defines the single library target and several +auxiliary ones. Packages are a unit of dependency in Cargo, and when +package `foo` depends on package `bar`, that means that each target +from `foo` needs the library target from `bar`. + +`PackageId` is the unique identifier of a (possibly remote) +package. It consist of three components: name, version and source +id. Source is the place where the source code for package comes +from. Typical sources are crates.io, a git repository or a folder on +the local hard drive. + +`Resolve` is the representation of a directed acyclic graph of package +dependencies, which uses `PackageId`s for nodes. This is the data +structure that is saved to the lock file. If there is no lock file, +Cargo constructs a resolve by finding a graph of packages which +matches declared dependency specification according to semver. + + +## Persistence + +Cargo is a non-daemon command line application, which means that all +the information used by Cargo must be persisted on the hard drive. The +main sources of information are `Cargo.toml` and `Cargo.lock` files, +`.cargo/config` configuration files and the globally shared registry +of packages downloaded from crates.io, usually located at +`~/.cargo/registry`. See `src/sources/registry` for the specifics of +the registry storage format. + + +## Concurrency + +Cargo is mostly single threaded. The only concurrency inside a single +instance of Cargo happens during compilation, when several instances +of `rustc` are invoked in parallel to build independent +targets. However there can be several different instances of Cargo +process running concurrently on the system. Cargo guarantees that this +is always safe by using file locks when accessing potentially shared +data like the registry or the target directory. + + +## Tests + +Cargo has an impressive test suite located in the `tests` folder. Most +of the test are integration: a project structure with `Cargo.toml` and +rust source code is created in a temporary directory, `cargo` binary +is invoked via `std::process::Command` and then stdout and stderr are +verified against the expected output. To simplify testing, several +macros of the form `[MACRO]` are used in the expected output. For +example, `[..]` matches any string. + +To see stdout and stderr streams of the subordinate process, add `.stream()` +call to the built-up `Execs`: + +```rust +// Before +p.cargo("run").run(); + +// After +p.cargo("run").stream().run(); +``` + +Alternatively to build and run a custom version of cargo simply run `cargo build` +and execute `target/debug/cargo`. Note that `+nightly`/`+stable` (and variants), +being [rustup](https://rustup.rs/) features, won't work when executing the locally +built cargo binary directly, you have to instead build with `cargo +nightly build` +and run with `rustup run` (e.g `rustup run nightly +/target/debug/cargo ..`) (or set the `RUSTC` env var to point +to nightly rustc). + +## Logging + +Cargo uses [`env_logger`](https://docs.rs/env_logger/*/env_logger/), so you can set +`RUST_LOG` environment variable to get the logs. This is useful both for diagnosing +bugs in stable Cargo and for local development. Cargo also has internal hierarchical +profiling infrastructure, which is activated via `CARGO_PROFILE` variable + +``` +# Outputs all logs with levels debug and higher +$ RUST_LOG=debug cargo generate-lockfile + +# Don't forget that you can filter by module as well +$ RUST_LOG=cargo::core::resolver=trace cargo generate-lockfile + +# Output first three levels of profiling info +$ CARGO_PROFILE=3 cargo generate-lockfile +``` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..929289e19 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,202 @@ +# Contributing to Cargo + +Thank you for your interest in contributing to Cargo! Good places to +start are this document, [ARCHITECTURE.md](ARCHITECTURE.md), which +describes the high-level structure of Cargo and [E-easy] bugs on the +issue tracker. + +If you have a general question about Cargo or it's internals, feel free to ask +on [Discord]. + +## Code of Conduct + +All contributors are expected to follow our [Code of Conduct]. + +## Bug reports + +We can't fix what we don't know about, so please report problems liberally. This +includes problems with understanding the documentation, unhelpful error messages +and unexpected behavior. + +**If you think that you have identified an issue with Cargo that might compromise +its users' security, please do not open a public issue on GitHub. Instead, +we ask you to refer to Rust's [security policy].** + +Opening an issue is as easy as following [this link][new-issues] and filling out +the fields. Here's a template that you can use to file an issue, though it's not +necessary to use it exactly: + + + + I tried this: + + I expected to see this happen: + + Instead, this happened: + + I'm using + +All three components are important: what you did, what you expected, what +happened instead. Please use https://gist.github.com/ if your examples run long. + + +## Feature requests + +Cargo follows the general Rust model of evolution. All major features go through +an RFC process. Therefore, before opening a feature request issue create a +Pre-RFC thread on the [internals][irlo] forum to get preliminary feedback. +Implementing a feature as a [custom subcommand][subcommands] is encouraged as it +helps demonstrate the demand for the functionality and is a great way to deliver +a working solution faster as it can iterate outside of cargo's release cadence. + +## Working on issues + +If you're looking for somewhere to start, check out the [E-easy][E-Easy] and +[E-mentor][E-mentor] tags. + +Feel free to ask for guidelines on how to tackle a problem on [Discord] or open a +[new issue][new-issues]. This is especially important if you want to add new +features to Cargo or make large changes to the already existing code-base. +Cargo's core developers will do their best to provide help. + +If you start working on an already-filed issue, post a comment on this issue to +let people know that somebody is working it. Feel free to ask for comments if +you are unsure about the solution you would like to submit. + +While Cargo does make use of some Rust-features available only through the +`nightly` toolchain, it must compile on stable Rust. Code added to Cargo +is encouraged to make use of the latest stable features of the language and +`stdlib`. + +We use the "fork and pull" model [described here][development-models], where +contributors push changes to their personal fork and create pull requests to +bring those changes into the source repository. This process is partly +automated: Pull requests are made against Cargo's master-branch, tested and +reviewed. Once a change is approved to be merged, a friendly bot merges the +changes into an internal branch, runs the full test-suite on that branch +and only then merges into master. This ensures that Cargo's master branch +passes the test-suite at all times. + +Your basic steps to get going: + +* Fork Cargo and create a branch from master for the issue you are working on. +* Please adhere to the code style that you see around the location you are +working on. +* [Commit as you go][githelp]. +* Include tests that cover all non-trivial code. The existing tests +in `test/` provide templates on how to test Cargo's behavior in a +sandbox-environment. The internal module `testsuite/support` provides a vast amount +of helpers to minimize boilerplate. See [`testsuite/support/mod.rs`] for an +introduction to writing tests. +* Make sure `cargo test` passes. If you do not have the cross-compilers +installed locally, install them using the instructions returned by +`cargo test cross_compile::cross_tests` (twice, with `--toolchain nightly` +added to get the nightly cross target too); alternatively just +ignore the cross-compile test failures or disable them by +using `CFG_DISABLE_CROSS_TESTS=1 cargo test`. Note that some tests are enabled +only on `nightly` toolchain. If you can, test both toolchains. +* All code changes are expected to comply with the formatting suggested by `rustfmt`. +You can use `rustup component add --toolchain nightly rustfmt-preview` to install `rustfmt` and use +`rustfmt +nightly --unstable-features --skip-children` on the changed files to automatically format your code. +* Push your commits to GitHub and create a pull request against Cargo's +`master` branch. + +## Pull requests + +After the pull request is made, a friendly bot will automatically assign a +reviewer; the review-process will make sure that the proposed changes are +sound. Please give the assigned reviewer sufficient time, especially during +weekends. If you don't get a reply, you may poke the core developers on [Discord]. + +A merge of Cargo's master-branch and your changes is immediately queued +to be tested after the pull request is made. In case unforeseen +problems are discovered during this step (e.g., a failure on a platform you +originally did not develop on), you may ask for guidance. Push additional +commits to your branch to tackle these problems. + +The reviewer might point out changes deemed necessary. Please add them as +extra commits; this ensures that the reviewer can see what has changed since +the code was previously reviewed. Large or tricky changes may require several +passes of review and changes. + +Once the reviewer approves your pull request, a friendly bot picks it up +and [merges][mergequeue] it into Cargo's `master` branch. + +## Contributing to the documentation + +To contribute to the documentation, all you need to do is change the markdown +files in the `src/doc` directory. To view the rendered version of changes you +have made locally, make sure you have `mdbook` installed and run: + +```sh +cd src/doc +mdbook build +open book/index.html +``` + +To install `mdbook` run `cargo install mdbook`. + + +## Issue Triage + +Sometimes an issue will stay open, even though the bug has been fixed. And +sometimes, the original bug may go stale because something has changed in the +meantime. + +It can be helpful to go through older bug reports and make sure that they are +still valid. Load up an older issue, double check that it's still true, and +leave a comment letting us know if it is or is not. The [least recently +updated sort][lru] is good for finding issues like this. + +Contributors with sufficient permissions on the Rust-repository can help by +adding labels to triage issues: + +* Yellow, **A**-prefixed labels state which **area** of the project an issue + relates to. + +* Magenta, **B**-prefixed labels identify bugs which are **blockers**. + +* Light purple, **C**-prefixed labels represent the **category** of an issue. + In particular, **C-feature-request** marks *proposals* for new features. If + an issue is **C-feature-request**, but is not **Feature accepted** or **I-nominated**, + then it was not thoroughly discussed, and might need some additional design + or perhaps should be implemented as an external subcommand first. Ping + @rust-lang/cargo if you want to send a PR for such issue. + +* Dark purple, **Command**-prefixed labels mean the issue has to do with a + specific cargo command. + +* Green, **E**-prefixed labels explain the level of **experience** or + **effort** necessary to fix the issue. [**E-mentor**][E-mentor] issues also + have some instructions on how to get started. + +* Red, **I**-prefixed labels indicate the **importance** of the issue. The + **[I-nominated][]** label indicates that an issue has been nominated for + prioritizing at the next triage meeting. + +* Purple gray, **O**-prefixed labels are the **operating system** or platform + that this issue is specific to. + +* Orange, **P**-prefixed labels indicate a bug's **priority**. These labels + are only assigned during triage meetings and replace the **[I-nominated][]** + label. + +* The light orange **relnotes** label marks issues that should be documented in + the release notes of the next release. + + +[githelp]: https://dont-be-afraid-to-commit.readthedocs.io/en/latest/git/commandlinegit.html +[development-models]: https://help.github.com/articles/about-collaborative-development-models/ +[gist]: https://gist.github.com/ +[new-issues]: https://github.com/rust-lang/cargo/issues/new +[mergequeue]: https://buildbot2.rust-lang.org/homu/queue/cargo +[security policy]: https://www.rust-lang.org/security.html +[lru]: https://github.com/rust-lang/cargo/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-asc +[E-easy]: https://github.com/rust-lang/cargo/labels/E-easy +[E-mentor]: https://github.com/rust-lang/cargo/labels/E-mentor +[I-nominated]: https://github.com/rust-lang/cargo/labels/I-nominated +[Code of Conduct]: https://www.rust-lang.org/conduct.html +[Discord]: https://discordapp.com/invite/rust-lang +[`testsuite/support/mod.rs`]: https://github.com/rust-lang/cargo/blob/master/tests/testsuite/support/mod.rs +[irlo]: https://internals.rust-lang.org/ +[subcommands]: https://doc.rust-lang.org/cargo/reference/external-tools.html#custom-subcommands diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..823838086 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,112 @@ +[package] +name = "cargo" +version = "0.35.0" +edition = "2018" +authors = ["Yehuda Katz ", + "Carl Lerche ", + "Alex Crichton "] +license = "MIT OR Apache-2.0" +homepage = "https://crates.io" +repository = "https://github.com/rust-lang/cargo" +documentation = "https://docs.rs/cargo" +description = """ +Cargo, a package manager for Rust. +""" + +[lib] +name = "cargo" +path = "src/cargo/lib.rs" + +[dependencies] +atty = "0.2" +byteorder = "1.2" +bytesize = "1.0" +crates-io = { path = "src/crates-io", version = "0.23" } +crossbeam-utils = "0.6" +crypto-hash = "0.3.1" +curl = { version = "0.4.19", features = ['http2'] } +curl-sys = "0.4.15" +env_logger = "0.6.0" +pretty_env_logger = { version = "0.3", optional = true } +failure = "0.1.5" +filetime = "0.2" +flate2 = { version = "1.0.3", features = ['zlib'] } +fs2 = "0.4" +git2 = "0.8.0" +git2-curl = "0.9.0" +glob = "0.2.11" +hex = "0.3" +home = "0.3" +ignore = "0.4" +lazy_static = "1.2.0" +jobserver = "0.1.11" +lazycell = "1.2.0" +libc = "0.2" +log = "0.4.6" +libgit2-sys = "0.7.9" +num_cpus = "1.0" +opener = "0.3.0" +rustfix = "0.4.4" +same-file = "1" +semver = { version = "0.9.0", features = ["serde"] } +serde = { version = "1.0.82", features = ['derive'] } +serde_ignored = "0.0.4" +serde_json = { version = "1.0.30", features = ["raw_value"] } +shell-escape = "0.1.4" +tar = { version = "0.4.18", default-features = false } +tempfile = "3.0" +termcolor = "1.0" +toml = "0.4.2" +url = "1.1" +url_serde = "0.2.0" +clap = "2.31.2" +unicode-width = "0.1.5" +openssl = { version = '0.10.11', optional = true } +im-rc = "12.1.0" + +# A noop dependency that changes in the Rust repository, it's a bit of a hack. +# See the `src/tools/rustc-workspace-hack/README.md` file in `rust-lang/rust` +# for more information. +rustc-workspace-hack = "1.0.0" + +[target.'cfg(target_os = "macos")'.dependencies] +core-foundation = { version = "0.6.0", features = ["mac_os_10_7_support"] } + +[target.'cfg(windows)'.dependencies] +miow = "0.3.1" +fwdansi = "1" + +[target.'cfg(windows)'.dependencies.winapi] +version = "0.3" +features = [ + "basetsd", + "handleapi", + "jobapi", + "jobapi2", + "memoryapi", + "minwindef", + "ntdef", + "ntstatus", + "processenv", + "processthreadsapi", + "psapi", + "synchapi", + "winerror", + "winbase", + "wincon", + "winnt", +] + +[dev-dependencies] +bufstream = "0.1" +proptest = "0.8.7" + +[[bin]] +name = "cargo" +test = false +doc = false + +[features] +deny-warnings = [] +vendored-openssl = ['openssl/vendored'] +pretty-env-logger = ['pretty_env_logger'] diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/LICENSE-THIRD-PARTY b/LICENSE-THIRD-PARTY new file mode 100644 index 000000000..c9897b96f --- /dev/null +++ b/LICENSE-THIRD-PARTY @@ -0,0 +1,1272 @@ +The Cargo source code itself does not bundle any third party libraries, but it +depends on a number of libraries which carry their own copyright notices and +license terms. These libraries are normally all linked static into the binary +distributions of Cargo: + +* OpenSSL - http://www.openssl.org/source/license.html + + Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3. All advertising materials mentioning features or use of this + software must display the following acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + + 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + endorse or promote products derived from this software without + prior written permission. For written permission, please contact + openssl-core@openssl.org. + + 5. Products derived from this software may not be called "OpenSSL" + nor may "OpenSSL" appear in their names without prior written + permission of the OpenSSL Project. + + 6. Redistributions of any form whatsoever must retain the following + acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit (http://www.openssl.org/)" + + THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. + ==================================================================== + + This product includes cryptographic software written by Eric Young + (eay@cryptsoft.com). This product includes software written by Tim + Hudson (tjh@cryptsoft.com). + + --- + + Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + All rights reserved. + + This package is an SSL implementation written + by Eric Young (eay@cryptsoft.com). + The implementation was written so as to conform with Netscapes SSL. + + This library is free for commercial and non-commercial use as long as + the following conditions are aheared to. The following conditions + apply to all code found in this distribution, be it the RC4, RSA, + lhash, DES, etc., code; not just the SSL code. The SSL documentation + included with this distribution is covered by the same copyright terms + except that the holder is Tim Hudson (tjh@cryptsoft.com). + + Copyright remains Eric Young's, and as such any Copyright notices in + the code are not to be removed. + If this package is used in a product, Eric Young should be given attribution + as the author of the parts of the library used. + This can be in the form of a textual message at program startup or + in documentation (online or textual) provided with the package. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. All advertising materials mentioning features or use of this software + must display the following acknowledgement: + "This product includes cryptographic software written by + Eric Young (eay@cryptsoft.com)" + The word 'cryptographic' can be left out if the rouines from the library + being used are not cryptographic related :-). + 4. If you include any Windows specific code (or a derivative thereof) from + the apps directory (application code) you must include an acknowledgement: + "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + + THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + + The licence and distribution terms for any publically available version or + derivative of this code cannot be changed. i.e. this code cannot simply be + copied and put under another distribution licence + [including the GNU Public Licence.] + +* libgit2 - https://github.com/libgit2/libgit2/blob/master/COPYING + + libgit2 is Copyright (C) the libgit2 contributors, + unless otherwise stated. See the AUTHORS file for details. + + Note that the only valid version of the GPL as far as this project + is concerned is _this_ particular version of the license (ie v2, not + v2.2 or v3.x or whatever), unless explicitly otherwise stated. + + ---------------------------------------------------------------------- + + LINKING EXCEPTION + + In addition to the permissions in the GNU General Public License, + the authors give you unlimited permission to link the compiled + version of this library into combinations with other programs, + and to distribute those combinations without any restriction + coming from the use of this file. (The General Public License + restrictions do apply in other respects; for example, they cover + modification of the file, and distribution when not linked into + a combined executable.) + + ---------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + License is intended to guarantee your freedom to share and change free + software--to make sure the software is free for all its users. This + General Public License applies to most of the Free Software + Foundation's software and to any other program whose authors commit to + using it. (Some other Free Software Foundation software is covered by + the GNU Library General Public License instead.) You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + this service if you wish), that you receive source code or can get it + if you want it, that you can change the software or use pieces of it + in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid + anyone to deny you these rights or to ask you to surrender the rights. + These restrictions translate to certain responsibilities for you if you + distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must give the recipients all the rights that + you have. You must make sure that they, too, receive or can get the + source code. And you must show them these terms so they know their + rights. + + We protect your rights with two steps: (1) copyright the software, and + (2) offer you this license which gives you legal permission to copy, + distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain + that everyone understands that there is no warranty for this free + software. If the software is modified by someone else and passed on, we + want its recipients to know that what they have is not the original, so + that any problems introduced by others will not reflect on the original + authors' reputations. + + Finally, any free program is threatened constantly by software + patents. We wish to avoid the danger that redistributors of a free + program will individually obtain patent licenses, in effect making the + program proprietary. To prevent this, we have made it clear that any + patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and + modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains + a notice placed by the copyright holder saying it may be distributed + under the terms of this General Public License. The "Program", below, + refers to any such program or work, and a "work based on the Program" + means either the Program or any derivative work under copyright law: + that is to say, a work containing the Program or a portion of it, + either verbatim or with modifications and/or translated into another + language. (Hereinafter, translation is included without limitation in + the term "modification".) Each licensee is addressed as "you". + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running the Program is not restricted, and the output from the Program + is covered only if its contents constitute a work based on the + Program (independent of having been made by running the Program). + Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's + source code as you receive it, in any medium, provided that you + conspicuously and appropriately publish on each copy an appropriate + copyright notice and disclaimer of warranty; keep intact all the + notices that refer to this License and to the absence of any warranty; + and give any other recipients of the Program a copy of this License + along with the Program. + + You may charge a fee for the physical act of transferring a copy, and + you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion + of it, thus forming a work based on the Program, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Program, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Program, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Program. + + In addition, mere aggregation of another work not based on the Program + with the Program (or with a work based on the Program) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, + under Section 2) in object code or executable form under the terms of + Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + + The source code for a work means the preferred form of the work for + making modifications to it. For an executable work, complete source + code means all the source code for all modules it contains, plus any + associated interface definition files, plus the scripts used to + control compilation and installation of the executable. However, as a + special exception, the source code distributed need not include + anything that is normally distributed (in either source or binary + form) with the major components (compiler, kernel, and so on) of the + operating system on which the executable runs, unless that component + itself accompanies the executable. + + If distribution of executable or object code is made by offering + access to copy from a designated place, then offering equivalent + access to copy the source code from the same place counts as + distribution of the source code, even though third parties are not + compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense or distribute the Program is + void, and will automatically terminate your rights under this License. + However, parties who have received copies, or rights, from you under + this License will not have their licenses terminated so long as such + parties remain in full compliance. + + 5. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Program or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Program (or any work based on the + Program), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the + Program), the recipient automatically receives a license from the + original licensor to copy, distribute or modify the Program subject to + these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties to + this License. + + 7. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Program at all. For example, if a patent + license would not permit royalty-free redistribution of the Program by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Program. + + If any portion of this section is held invalid or unenforceable under + any particular circumstance, the balance of the section is intended to + apply and the section as a whole is intended to apply in other + circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system, which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Program under this License + may add an explicit geographical distribution limitation excluding + those countries, so that distribution is permitted only in or among + countries not thus excluded. In such case, this License incorporates + the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions + of the General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the Program + specifies a version number of this License which applies to it and "any + later version", you have the option of following the terms and conditions + either of that version or of any later version published by the Free + Software Foundation. If the Program does not specify a version number of + this License, you may choose any version ever published by the Free Software + Foundation. + + 10. If you wish to incorporate parts of the Program into other free + programs whose distribution conditions are different, write to the author + to ask for permission. For software which is copyrighted by the Free + Software Foundation, write to the Free Software Foundation; we sometimes + make exceptions for this. Our decision will be guided by the two goals + of preserving the free status of all derivatives of our free software and + of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY + FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN + OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES + PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED + OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE + PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, + REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR + REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, + INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING + OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED + TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY + YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + + Also add information on how to contact you by electronic and paper mail. + + If the program is interactive, make it output a short notice like this + when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, the commands you use may + be called something other than `show w' and `show c'; they could even be + mouse-clicks or menu items--whatever suits your program. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the program, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + + This General Public License does not permit incorporating your program into + proprietary programs. If your program is a subroutine library, you may + consider it more useful to permit linking proprietary applications with the + library. If this is what you want to do, use the GNU Library General + Public License instead of this License. + + ---------------------------------------------------------------------- + + The bundled ZLib code is licensed under the ZLib license: + + Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + ---------------------------------------------------------------------- + + The Clar framework is licensed under the MIT license: + + Copyright (C) 2011 by Vicent Marti + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + ---------------------------------------------------------------------- + + The regex library (deps/regex/) is licensed under the GNU LGPL + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + [This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + Licenses are intended to guarantee your freedom to share and change + free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some + specially designated software packages--typically libraries--of the + Free Software Foundation and other authors who decide to use it. You + can use it too, but we suggest you first think carefully about whether + this license or the ordinary General Public License is the better + strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, + not price. Our General Public Licenses are designed to make sure that + you have the freedom to distribute copies of free software (and charge + for this service if you wish); that you receive source code or can get + it if you want it; that you can change the software and use pieces of + it in new free programs; and that you are informed that you can do + these things. + + To protect your rights, we need to make restrictions that forbid + distributors to deny you these rights or to ask you to surrender these + rights. These restrictions translate to certain responsibilities for + you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis + or for a fee, you must give the recipients all the rights that we gave + you. You must make sure that they, too, receive or can get the source + code. If you link other code with the library, you must provide + complete object files to the recipients, so that they can relink them + with the library after making changes to the library and recompiling + it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the + library, and (2) we offer you this license, which gives you legal + permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that + there is no warranty for the free library. Also, if the library is + modified by someone else and passed on, the recipients should know + that what they have is not the original version, so that the original + author's reputation will not be affected by problems that might be + introduced by others. + + Finally, software patents pose a constant threat to the existence of + any free program. We wish to make sure that a company cannot + effectively restrict the users of a free program by obtaining a + restrictive license from a patent holder. Therefore, we insist that + any patent license obtained for a version of the library must be + consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the + ordinary GNU General Public License. This license, the GNU Lesser + General Public License, applies to certain designated libraries, and + is quite different from the ordinary General Public License. We use + this license for certain libraries in order to permit linking those + libraries into non-free programs. + + When a program is linked with a library, whether statically or using + a shared library, the combination of the two is legally speaking a + combined work, a derivative of the original library. The ordinary + General Public License therefore permits such linking only if the + entire combination fits its criteria of freedom. The Lesser General + Public License permits more lax criteria for linking other code with + the library. + + We call this license the "Lesser" General Public License because it + does Less to protect the user's freedom than the ordinary General + Public License. It also provides other free software developers Less + of an advantage over competing non-free programs. These disadvantages + are the reason we use the ordinary General Public License for many + libraries. However, the Lesser license provides advantages in certain + special circumstances. + + For example, on rare occasions, there may be a special need to + encourage the widest possible use of a certain library, so that it becomes + a de-facto standard. To achieve this, non-free programs must be + allowed to use the library. A more frequent case is that a free + library does the same job as widely used non-free libraries. In this + case, there is little to gain by limiting the free library to free + software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free + programs enables a greater number of people to use a large body of + free software. For example, permission to use the GNU C Library in + non-free programs enables many more people to use the whole GNU + operating system, as well as its variant, the GNU/Linux operating + system. + + Although the Lesser General Public License is Less protective of the + users' freedom, it does ensure that the user of a program that is + linked with the Library has the freedom and the wherewithal to run + that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and + modification follow. Pay close attention to the difference between a + "work based on the library" and a "work that uses the library". The + former contains code derived from the library, whereas the latter must + be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other + program which contains a notice placed by the copyright holder or + other authorized party saying it may be distributed under the terms of + this Lesser General Public License (also called "this License"). + Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data + prepared so as to be conveniently linked with application programs + (which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work + which has been distributed under these terms. A "work based on the + Library" means either the Library or any derivative work under + copyright law: that is to say, a work containing the Library or a + portion of it, either verbatim or with modifications and/or translated + straightforwardly into another language. (Hereinafter, translation is + included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for + making modifications to it. For a library, complete source code means + all the source code for all modules it contains, plus any associated + interface definition files, plus the scripts used to control compilation + and installation of the library. + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running a program using the Library is not restricted, and output from + such a program is covered only if its contents constitute a work based + on the Library (independent of the use of the Library in a tool for + writing it). Whether that is true depends on what the Library does + and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's + complete source code as you receive it, in any medium, provided that + you conspicuously and appropriately publish on each copy an + appropriate copyright notice and disclaimer of warranty; keep intact + all the notices that refer to this License and to the absence of any + warranty; and distribute a copy of this License along with the + Library. + + You may charge a fee for the physical act of transferring a copy, + and you may at your option offer warranty protection in exchange for a + fee. + + 2. You may modify your copy or copies of the Library or any portion + of it, thus forming a work based on the Library, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Library, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Library, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote + it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Library. + + In addition, mere aggregation of another work not based on the Library + with the Library (or with a work based on the Library) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public + License instead of this License to a given copy of the Library. To do + this, you must alter all the notices that refer to this License, so + that they refer to the ordinary GNU General Public License, version 2, + instead of to this License. (If a newer version than version 2 of the + ordinary GNU General Public License has appeared, then you can specify + that version instead if you wish.) Do not make any other change in + these notices. + + Once this change is made in a given copy, it is irreversible for + that copy, so the ordinary GNU General Public License applies to all + subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of + the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or + derivative of it, under Section 2) in object code or executable form + under the terms of Sections 1 and 2 above provided that you accompany + it with the complete corresponding machine-readable source code, which + must be distributed under the terms of Sections 1 and 2 above on a + medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy + from a designated place, then offering equivalent access to copy the + source code from the same place satisfies the requirement to + distribute the source code, even though third parties are not + compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the + Library, but is designed to work with the Library by being compiled or + linked with it, is called a "work that uses the Library". Such a + work, in isolation, is not a derivative work of the Library, and + therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library + creates an executable that is a derivative of the Library (because it + contains portions of the Library), rather than a "work that uses the + library". The executable is therefore covered by this License. + Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file + that is part of the Library, the object code for the work may be a + derivative work of the Library even though the source code is not. + Whether this is true is especially significant if the work can be + linked without the Library, or if the work is itself a library. The + threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data + structure layouts and accessors, and small macros and small inline + functions (ten lines or less in length), then the use of the object + file is unrestricted, regardless of whether it is legally a derivative + work. (Executables containing this object code plus portions of the + Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may + distribute the object code for the work under the terms of Section 6. + Any executables containing that work also fall under Section 6, + whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or + link a "work that uses the Library" with the Library to produce a + work containing portions of the Library, and distribute that work + under terms of your choice, provided that the terms permit + modification of the work for the customer's own use and reverse + engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the + Library is used in it and that the Library and its use are covered by + this License. You must supply a copy of this License. If the work + during execution displays copyright notices, you must include the + copyright notice for the Library among them, as well as a reference + directing the user to the copy of this License. Also, you must do one + of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the + Library" must include any data and utility programs needed for + reproducing the executable from it. However, as a special exception, + the materials to be distributed need not include anything that is + normally distributed (in either source or binary form) with the major + components (compiler, kernel, and so on) of the operating system on + which the executable runs, unless that component itself accompanies + the executable. + + It may happen that this requirement contradicts the license + restrictions of other proprietary libraries that do not normally + accompany the operating system. Such a contradiction means you cannot + use both them and the Library together in an executable that you + distribute. + + 7. You may place library facilities that are a work based on the + Library side-by-side in a single library together with other library + facilities not covered by this License, and distribute such a combined + library, provided that the separate distribution of the work based on + the Library and of the other library facilities is otherwise + permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute + the Library except as expressly provided under this License. Any + attempt otherwise to copy, modify, sublicense, link with, or + distribute the Library is void, and will automatically terminate your + rights under this License. However, parties who have received copies, + or rights, from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Library or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Library (or any work based on the + Library), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the + Library), the recipient automatically receives a license from the + original licensor to copy, distribute, link with or modify the Library + subject to these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties with + this License. + + 11. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Library at all. For example, if a patent + license would not permit royalty-free redistribution of the Library by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Library. + + If any portion of this section is held invalid or unenforceable under any + particular circumstance, the balance of the section is intended to apply, + and the section as a whole is intended to apply in other circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Library under this License may add + an explicit geographical distribution limitation excluding those countries, + so that distribution is permitted only in or among countries not thus + excluded. In such case, this License incorporates the limitation as if + written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new + versions of the Lesser General Public License from time to time. + Such new versions will be similar in spirit to the present version, + but may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the Library + specifies a version number of this License which applies to it and + "any later version", you have the option of following the terms and + conditions either of that version or of any later version published by + the Free Software Foundation. If the Library does not specify a + license version number, you may choose any version ever published by + the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free + programs whose distribution conditions are incompatible with these, + write to the author to ask for permission. For software which is + copyrighted by the Free Software Foundation, write to the Free + Software Foundation; we sometimes make exceptions for this. Our + decision will be guided by the two goals of preserving the free status + of all derivatives of our free software and of promoting the sharing + and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest + possible use to the public, we recommend making it free software that + everyone can redistribute and change. You can do so by permitting + redistribution under these terms (or, alternatively, under the terms of the + ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is + safest to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least the + "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + Also add information on how to contact you by electronic and paper mail. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the library, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + + That's all there is to it! + + ---------------------------------------------------------------------- + +* libssh2 - http://www.libssh2.org/license.html + + Copyright (c) 2004-2007 Sara Golemon + Copyright (c) 2005,2006 Mikhail Gusarov + Copyright (c) 2006-2007 The Written Word, Inc. + Copyright (c) 2007 Eli Fant + Copyright (c) 2009 Daniel Stenberg + Copyright (C) 2008, 2009 Simon Josefsson + All rights reserved. + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + Neither the name of the copyright holder nor the names + of any other contributors may be used to endorse or + promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. + +* libcurl - http://curl.haxx.se/docs/copyright.html + + COPYRIGHT AND PERMISSION NOTICE + + Copyright (c) 1996 - 2014, Daniel Stenberg, daniel@haxx.se. + + All rights reserved. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of a copyright holder shall not + be used in advertising or otherwise to promote the sale, use or other + dealings in this Software without prior written authorization of the + copyright holder. + +* flate2-rs - https://github.com/alexcrichton/flate2-rs/blob/master/LICENSE-MIT +* link-config - https://github.com/alexcrichton/link-config/blob/master/LICENSE-MIT +* openssl-static-sys - https://github.com/alexcrichton/openssl-static-sys/blob/master/LICENSE-MIT +* toml-rs - https://github.com/alexcrichton/toml-rs/blob/master/LICENSE-MIT +* libssh2-static-sys - https://github.com/alexcrichton/libssh2-static-sys/blob/master/LICENSE-MIT +* git2-rs - https://github.com/alexcrichton/git2-rs/blob/master/LICENSE-MIT +* tar-rs - https://github.com/alexcrichton/tar-rs/blob/master/LICENSE-MIT + + Copyright (c) 2014 Alex Crichton + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* glob - https://github.com/rust-lang/glob/blob/master/LICENSE-MIT +* semver - https://github.com/rust-lang/semver/blob/master/LICENSE-MIT + + Copyright (c) 2014 The Rust Project Developers + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-url - https://github.com/servo/rust-url/blob/master/LICENSE-MIT + + Copyright (c) 2006-2009 Graydon Hoare + Copyright (c) 2009-2013 Mozilla Foundation + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-encoding - https://github.com/lifthrasiir/rust-encoding/blob/master/LICENSE.txt + + The MIT License (MIT) + + Copyright (c) 2013, Kang Seonghoon. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* curl-rust - https://github.com/carllerche/curl-rust/blob/master/LICENSE + + Copyright (c) 2014 Carl Lerche + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* docopt.rs - https://github.com/docopt/docopt.rs/blob/master/UNLICENSE + + This is free and unencumbered software released into the public domain. + + Anyone is free to copy, modify, publish, use, compile, sell, or + distribute this software, either in source code form or as a compiled + binary, for any purpose, commercial or non-commercial, and by any + means. + + In jurisdictions that recognize copyright laws, the author or authors + of this software dedicate any and all copyright interest in the + software to the public domain. We make this dedication for the benefit + of the public at large and to the detriment of our heirs and + successors. We intend this dedication to be an overt act of + relinquishment in perpetuity of all present and future rights to this + software under copyright law. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + For more information, please refer to + diff --git a/README.md b/README.md new file mode 100644 index 000000000..d38aefa23 --- /dev/null +++ b/README.md @@ -0,0 +1,90 @@ +# Cargo + +Cargo downloads your Rust project’s dependencies and compiles your project. + +Learn more at https://doc.rust-lang.org/cargo/ + +## Code Status + +[![Build Status](https://travis-ci.com/rust-lang/cargo.svg?branch=master)](https://travis-ci.com/rust-lang/cargo) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/rust-lang/cargo?branch=master&svg=true)](https://ci.appveyor.com/project/rust-lang-libs/cargo) + +Code documentation: https://docs.rs/cargo/ + +## Installing Cargo + +Cargo is distributed by default with Rust, so if you've got `rustc` installed +locally you probably also have `cargo` installed locally. + +## Compiling from Source + +Cargo requires the following tools and packages to build: + +* `git` +* `python` +* `curl` (on Unix) +* OpenSSL headers (only for Unix, this is the `libssl-dev` package on ubuntu) +* `cargo` and `rustc` + +First, you'll want to check out this repository + +``` +git clone https://github.com/rust-lang/cargo +cd cargo +``` + +With `cargo` already installed, you can simply run: + +``` +cargo build --release +``` + +## Adding new subcommands to Cargo + +Cargo is designed to be extensible with new subcommands without having to modify +Cargo itself. See [the Wiki page][third-party-subcommands] for more details and +a list of known community-developed subcommands. + +[third-party-subcommands]: https://github.com/rust-lang/cargo/wiki/Third-party-cargo-subcommands + + +## Releases + +High level release notes are available as part of [Rust's release notes][rel]. +Cargo releases coincide with Rust releases. + +[rel]: https://github.com/rust-lang/rust/blob/master/RELEASES.md + +## Reporting issues + +Found a bug? We'd love to know about it! + +Please report all issues on the GitHub [issue tracker][issues]. + +[issues]: https://github.com/rust-lang/cargo/issues + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md). You may also find the architecture +documentation useful ([ARCHITECTURE.md](ARCHITECTURE.md)). + +## License + +Cargo is primarily distributed under the terms of both the MIT license +and the Apache License (Version 2.0). + +See LICENSE-APACHE and LICENSE-MIT for details. + +### Third party software + +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (http://www.openssl.org/). + +In binary form, this product includes software that is licensed under the +terms of the GNU General Public License, version 2, with a linking exception, +which can be obtained from the [upstream repository][1]. + +See LICENSE-THIRD-PARTY for details. + +[1]: https://github.com/libgit2/libgit2 + diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 000000000..5d2ec309b --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,26 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + OTHER_TARGET: i686-pc-windows-msvc + +install: + - if NOT defined APPVEYOR_PULL_REQUEST_NUMBER if "%APPVEYOR_REPO_BRANCH%" == "master" appveyor exit + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - if defined MINIMAL_VERSIONS rustup toolchain install 1.31.0 + - if defined OTHER_TARGET rustup target add %OTHER_TARGET% + - rustc -V + - cargo -V + - git submodule update --init + +clone_depth: 1 + +build: false + +test_script: + # we don't have ci time to run the full `cargo test` with `minimal-versions` like + # - if defined MINIMAL_VERSIONS cargo +nightly generate-lockfile -Z minimal-versions && cargo +stable test + # so we just run `cargo check --tests` like + - if defined MINIMAL_VERSIONS cargo +nightly generate-lockfile -Z minimal-versions && cargo +1.31.0 check --tests --features=deny-warnings + - if NOT defined MINIMAL_VERSIONS cargo test --features=deny-warnings diff --git a/src/bin/cargo/cli.rs b/src/bin/cargo/cli.rs new file mode 100644 index 000000000..3b2f2f99a --- /dev/null +++ b/src/bin/cargo/cli.rs @@ -0,0 +1,242 @@ +use clap; + +use clap::{AppSettings, Arg, ArgMatches}; + +use cargo::{self, CliResult, Config}; + +use super::commands; +use super::list_commands; +use crate::command_prelude::*; + +pub fn main(config: &mut Config) -> CliResult { + let args = match cli().get_matches_safe() { + Ok(args) => args, + Err(e) => { + if e.kind == clap::ErrorKind::UnrecognizedSubcommand { + // An unrecognized subcommand might be an external subcommand. + let cmd = &e.info.as_ref().unwrap()[0].to_owned(); + return super::execute_external_subcommand(config, cmd, &[cmd, "--help"]) + .map_err(|_| e.into()); + } else { + return Err(e)?; + } + } + }; + + if args.value_of("unstable-features") == Some("help") { + println!( + " +Available unstable (nightly-only) flags: + + -Z avoid-dev-deps -- Avoid installing dev-dependencies if possible + -Z minimal-versions -- Install minimal dependency versions instead of maximum + -Z no-index-update -- Do not update the registry, avoids a network request for benchmarking + -Z offline -- Offline mode that does not perform network requests + -Z unstable-options -- Allow the usage of unstable options such as --registry + -Z config-profile -- Read profiles from .cargo/config files + +Run with 'cargo -Z [FLAG] [SUBCOMMAND]'" + ); + return Ok(()); + } + + let is_verbose = args.occurrences_of("verbose") > 0; + if args.is_present("version") { + let version = get_version_string(is_verbose); + print!("{}", version); + return Ok(()); + } + + if let Some(ref code) = args.value_of("explain") { + let mut procss = config.rustc(None)?.process(); + procss.arg("--explain").arg(code).exec()?; + return Ok(()); + } + + if args.is_present("list") { + println!("Installed Commands:"); + for command in list_commands(config) { + match command { + CommandInfo::BuiltIn { name, about } => { + let summary = about.unwrap_or_default(); + let summary = summary.lines().next().unwrap_or(&summary); // display only the first line + println!(" {:<20} {}", name, summary) + } + CommandInfo::External { name, path } => { + if is_verbose { + println!(" {:<20} {}", name, path.display()) + } else { + println!(" {}", name) + } + } + } + } + return Ok(()); + } + + let args = expand_aliases(config, args)?; + + execute_subcommand(config, &args) +} + +pub fn get_version_string(is_verbose: bool) -> String { + let version = cargo::version(); + let mut version_string = version.to_string(); + version_string.push_str("\n"); + if is_verbose { + version_string.push_str(&format!( + "release: {}.{}.{}\n", + version.major, version.minor, version.patch + )); + if let Some(ref cfg) = version.cfg_info { + if let Some(ref ci) = cfg.commit_info { + version_string.push_str(&format!("commit-hash: {}\n", ci.commit_hash)); + version_string.push_str(&format!("commit-date: {}\n", ci.commit_date)); + } + } + } + version_string +} + +fn expand_aliases( + config: &mut Config, + args: ArgMatches<'static>, +) -> Result, CliError> { + if let (cmd, Some(args)) = args.subcommand() { + match ( + commands::builtin_exec(cmd), + super::aliased_command(config, cmd)?, + ) { + (Some(_), Some(_)) => { + // User alias conflicts with a built-in subcommand + config.shell().warn(format!( + "user-defined alias `{}` is ignored, because it is shadowed by a built-in command", + cmd, + ))?; + } + (_, Some(mut alias)) => { + alias.extend( + args.values_of("") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + let args = cli() + .setting(AppSettings::NoBinaryName) + .get_matches_from_safe(alias)?; + return expand_aliases(config, args); + } + (_, None) => {} + } + }; + + Ok(args) +} + +fn execute_subcommand(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let (cmd, subcommand_args) = match args.subcommand() { + (cmd, Some(args)) => (cmd, args), + _ => { + cli().print_help()?; + return Ok(()); + } + }; + + let arg_target_dir = &subcommand_args.value_of_path("target-dir", config); + + config.configure( + args.occurrences_of("verbose") as u32, + if args.is_present("quiet") { + Some(true) + } else { + None + }, + &args.value_of("color").map(|s| s.to_string()), + args.is_present("frozen"), + args.is_present("locked"), + arg_target_dir, + &args + .values_of_lossy("unstable-features") + .unwrap_or_default(), + )?; + + if let Some(exec) = commands::builtin_exec(cmd) { + return exec(config, subcommand_args); + } + + let mut ext_args: Vec<&str> = vec![cmd]; + ext_args.extend(subcommand_args.values_of("").unwrap_or_default()); + super::execute_external_subcommand(config, cmd, &ext_args) +} + +fn cli() -> App { + App::new("cargo") + .settings(&[ + AppSettings::UnifiedHelpMessage, + AppSettings::DeriveDisplayOrder, + AppSettings::VersionlessSubcommands, + AppSettings::AllowExternalSubcommands, + ]) + .about("") + .template( + "\ +Rust's package manager + +USAGE: + {usage} + +OPTIONS: +{unified} + +Some common cargo commands are (see all commands with --list): + build Compile the current package + check Analyze the current package and report errors, but don't build object files + clean Remove the target directory + doc Build this package's and its dependencies' documentation + new Create a new cargo package + init Create a new cargo package in an existing directory + run Run a binary or example of the local package + test Run the tests + bench Run the benchmarks + update Update dependencies listed in Cargo.lock + search Search registry for crates + publish Package and upload this package to the registry + install Install a Rust binary. Default location is $HOME/.cargo/bin + uninstall Uninstall a Rust binary + +See 'cargo help ' for more information on a specific command.\n", + ) + .arg(opt("version", "Print version info and exit").short("V")) + .arg(opt("list", "List installed commands")) + .arg(opt("explain", "Run `rustc --explain CODE`").value_name("CODE")) + .arg( + opt( + "verbose", + "Use verbose output (-vv very verbose/build.rs output)", + ) + .short("v") + .multiple(true) + .global(true), + ) + .arg( + opt("quiet", "No output printed to stdout") + .short("q") + .global(true), + ) + .arg( + opt("color", "Coloring: auto, always, never") + .value_name("WHEN") + .global(true), + ) + .arg(opt("frozen", "Require Cargo.lock and cache are up to date").global(true)) + .arg(opt("locked", "Require Cargo.lock is up to date").global(true)) + .arg( + Arg::with_name("unstable-features") + .help("Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details") + .short("Z") + .value_name("FLAG") + .multiple(true) + .number_of_values(1) + .global(true), + ) + .subcommands(commands::builtin()) +} diff --git a/src/bin/cargo/commands/bench.rs b/src/bin/cargo/commands/bench.rs new file mode 100644 index 000000000..84681e6bd --- /dev/null +++ b/src/bin/cargo/commands/bench.rs @@ -0,0 +1,105 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, TestOptions}; + +pub fn cli() -> App { + subcommand("bench") + .setting(AppSettings::TrailingVarArg) + .about("Execute all benchmarks of a local package") + .arg( + Arg::with_name("BENCHNAME") + .help("If specified, only run benches containing this string in their names"), + ) + .arg( + Arg::with_name("args") + .help("Arguments for the bench binary") + .multiple(true) + .last(true), + ) + .arg_targets_all( + "Benchmark only this package's library", + "Benchmark only the specified binary", + "Benchmark all binaries", + "Benchmark only the specified example", + "Benchmark all examples", + "Benchmark only the specified test target", + "Benchmark all tests", + "Benchmark only the specified bench target", + "Benchmark all benches", + "Benchmark all targets", + ) + .arg(opt("no-run", "Compile, but don't run benchmarks")) + .arg_package_spec( + "Package to run benchmarks for", + "Benchmark all packages in the workspace", + "Exclude packages from the benchmark", + ) + .arg_jobs() + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .arg(opt( + "no-fail-fast", + "Run all benchmarks regardless of failure", + )) + .after_help( + "\ +The benchmark filtering argument BENCHNAME and all the arguments following the +two dashes (`--`) are passed to the benchmark binaries and thus to libtest +(rustc's built in unit-test and micro-benchmarking framework). If you're +passing arguments to both Cargo and the binary, the ones after `--` go to the +binary, the ones before go to Cargo. For details about libtest's arguments see +the output of `cargo bench -- --help`. + +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package should be benchmarked. If it is not given, then +the current package is benchmarked. For more information on SPEC and its format, +see the `cargo help pkgid` command. + +All packages in the workspace are benchmarked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The `--jobs` argument affects the building of the benchmark executable but does +not affect how many jobs are used when running the benchmarks. + +Compilation can be customized with the `bench` profile in the manifest. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = args.compile_options(config, CompileMode::Bench, Some(&ws))?; + + compile_opts.build_config.release = true; + + let ops = TestOptions { + no_run: args.is_present("no-run"), + no_fail_fast: args.is_present("no-fail-fast"), + compile_opts, + }; + + let mut bench_args = vec![]; + bench_args.extend( + args.value_of("BENCHNAME") + .into_iter() + .map(|s| s.to_string()), + ); + bench_args.extend( + args.values_of("args") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + + let err = ops::run_benches(&ws, &ops, &bench_args)?; + match err { + None => Ok(()), + Some(err) => Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new(failure::format_err!("bench failed"), i), + None => CliError::new(err.into(), 101), + }), + } +} diff --git a/src/bin/cargo/commands/build.rs b/src/bin/cargo/commands/build.rs new file mode 100644 index 000000000..3938a8e56 --- /dev/null +++ b/src/bin/cargo/commands/build.rs @@ -0,0 +1,61 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("build") + // subcommand aliases are handled in aliased_command() + // .alias("b") + .about("Compile a local package and all of its dependencies") + .arg_package_spec( + "Package to build (see `cargo help pkgid`)", + "Build all packages in the workspace", + "Exclude packages from the build", + ) + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg(opt("out-dir", "Copy final artifacts to this directory").value_name("PATH")) + .arg_manifest_path() + .arg_message_format() + .arg_build_plan() + .after_help( + "\ +All packages in the workspace are built if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the --release flag will use the `release` profile instead. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = args.compile_options(config, CompileMode::Build, Some(&ws))?; + + compile_opts.export_dir = args.value_of_path("out-dir", config); + if compile_opts.export_dir.is_some() && !config.cli_unstable().unstable_options { + Err(failure::format_err!( + "`--out-dir` flag is unstable, pass `-Z unstable-options` to enable it" + ))?; + }; + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/check.rs b/src/bin/cargo/commands/check.rs new file mode 100644 index 000000000..e4f73f260 --- /dev/null +++ b/src/bin/cargo/commands/check.rs @@ -0,0 +1,75 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("check") + // subcommand aliases are handled in aliased_command() + // .alias("c") + .about("Check a local package and all of its dependencies for errors") + .arg_package_spec( + "Package(s) to check", + "Check all packages in the workspace", + "Exclude packages from the check", + ) + .arg_jobs() + .arg_targets_all( + "Check only this package's library", + "Check only the specified binary", + "Check all binaries", + "Check only the specified example", + "Check all examples", + "Check only the specified test target", + "Check all tests", + "Check only the specified bench target", + "Check all benches", + "Check all targets", + ) + .arg_release("Check artifacts in release mode, with optimizations") + .arg(opt("profile", "Profile to build the selected target for").value_name("PROFILE")) + .arg_features() + .arg_target_triple("Check for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package should be built. If it is not given, then the +current package is built. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are checked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the `--release` flag will use the `release` profile instead. + +The `--profile test` flag can be used to check unit tests with the +`#[cfg(test)]` attribute. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let test = match args.value_of("profile") { + Some("test") => true, + None => false, + Some(profile) => { + let err = failure::format_err!( + "unknown profile: `{}`, only `test` is \ + currently supported", + profile + ); + return Err(CliError::new(err, 101)); + } + }; + let mode = CompileMode::Check { test }; + let compile_opts = args.compile_options(config, mode, Some(&ws))?; + + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/clean.rs b/src/bin/cargo/commands/clean.rs new file mode 100644 index 000000000..e21fc470f --- /dev/null +++ b/src/bin/cargo/commands/clean.rs @@ -0,0 +1,35 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, CleanOptions}; + +pub fn cli() -> App { + subcommand("clean") + .about("Remove artifacts that cargo has generated in the past") + .arg_package_spec_simple("Package to clean artifacts for") + .arg_manifest_path() + .arg_target_triple("Target triple to clean output for") + .arg_target_dir() + .arg_release("Whether or not to clean release artifacts") + .arg_doc("Whether or not to clean just the documentation directory") + .after_help( + "\ +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package's artifacts should be cleaned out. If it is not +given, then all packages' artifacts are removed. For more information on SPEC +and its format, see the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let opts = CleanOptions { + config, + spec: values(args, "package"), + target: args.target(), + release: args.is_present("release"), + doc: args.is_present("doc"), + }; + ops::clean(&ws, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/doc.rs b/src/bin/cargo/commands/doc.rs new file mode 100644 index 000000000..a802e34b9 --- /dev/null +++ b/src/bin/cargo/commands/doc.rs @@ -0,0 +1,65 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, DocOptions}; + +pub fn cli() -> App { + subcommand("doc") + .about("Build a package's documentation") + .arg(opt( + "open", + "Opens the docs in a browser after the operation", + )) + .arg_package_spec( + "Package to document", + "Document all packages in the workspace", + "Exclude packages from the build", + ) + .arg(opt("no-deps", "Don't build documentation for dependencies")) + .arg(opt("document-private-items", "Document private items")) + .arg_jobs() + .arg_targets_lib_bin( + "Document only this package's library", + "Document only the specified binary", + "Document all binaries", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +By default the documentation for the local package and all dependencies is +built. The output is all placed in `target/doc` in rustdoc's usual format. + +All packages in the workspace are documented if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let mode = CompileMode::Doc { + deps: !args.is_present("no-deps"), + }; + let mut compile_opts = args.compile_options(config, mode, Some(&ws))?; + compile_opts.local_rustdoc_args = if args.is_present("document-private-items") { + Some(vec!["--document-private-items".to_string()]) + } else { + None + }; + let doc_opts = DocOptions { + open_result: args.is_present("open"), + compile_opts, + }; + ops::doc(&ws, &doc_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/fetch.rs b/src/bin/cargo/commands/fetch.rs new file mode 100644 index 000000000..7d3641320 --- /dev/null +++ b/src/bin/cargo/commands/fetch.rs @@ -0,0 +1,34 @@ +use crate::command_prelude::*; + +use cargo::ops; +use cargo::ops::FetchOptions; + +pub fn cli() -> App { + subcommand("fetch") + .about("Fetch dependencies of a package from the network") + .arg_manifest_path() + .arg_target_triple("Fetch dependencies for the target triple") + .after_help( + "\ +If a lock file is available, this command will ensure that all of the Git +dependencies and/or registries dependencies are downloaded and locally +available. The network is never touched after a `cargo fetch` unless +the lock file changes. + +If the lock file is not available, then this is the equivalent of +`cargo generate-lockfile`. A lock file is generated and dependencies are also +all updated. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + + let opts = FetchOptions { + config, + target: args.target(), + }; + ops::fetch(&ws, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/fix.rs b/src/bin/cargo/commands/fix.rs new file mode 100644 index 000000000..bc6542974 --- /dev/null +++ b/src/bin/cargo/commands/fix.rs @@ -0,0 +1,151 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, CompileFilter, FilterRule}; + +pub fn cli() -> App { + subcommand("fix") + .about("Automatically fix lint warnings reported by rustc") + .arg_package_spec( + "Package(s) to fix", + "Fix all packages in the workspace", + "Exclude packages from the fixes", + ) + .arg_jobs() + .arg_targets_all( + "Fix only this package's library", + "Fix only the specified binary", + "Fix all binaries", + "Fix only the specified example", + "Fix all examples", + "Fix only the specified test target", + "Fix all tests", + "Fix only the specified bench target", + "Fix all benches", + "Fix all targets (default)", + ) + .arg_release("Fix artifacts in release mode, with optimizations") + .arg(opt("profile", "Profile to build the selected target for").value_name("PROFILE")) + .arg_features() + .arg_target_triple("Fix for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .arg( + Arg::with_name("broken-code") + .long("broken-code") + .help("Fix code even if it already has compiler errors"), + ) + .arg( + Arg::with_name("edition") + .long("edition") + .help("Fix in preparation for the next edition"), + ) + .arg( + // This is a deprecated argument, we'll want to phase it out + // eventually. + Arg::with_name("prepare-for") + .long("prepare-for") + .help("Fix warnings in preparation of an edition upgrade") + .takes_value(true) + .possible_values(&["2018"]) + .conflicts_with("edition") + .hidden(true), + ) + .arg( + Arg::with_name("idioms") + .long("edition-idioms") + .help("Fix warnings to migrate to the idioms of an edition"), + ) + .arg( + Arg::with_name("allow-no-vcs") + .long("allow-no-vcs") + .help("Fix code even if a VCS was not detected"), + ) + .arg( + Arg::with_name("allow-dirty") + .long("allow-dirty") + .help("Fix code even if the working directory is dirty"), + ) + .arg( + Arg::with_name("allow-staged") + .long("allow-staged") + .help("Fix code even if the working directory has staged changes"), + ) + .after_help( + "\ +This Cargo subcommand will automatically take rustc's suggestions from +diagnostics like warnings and apply them to your source code. This is intended +to help automate tasks that rustc itself already knows how to tell you to fix! +The `cargo fix` subcommand is also being developed for the Rust 2018 edition +to provide code the ability to easily opt-in to the new edition without having +to worry about any breakage. + +Executing `cargo fix` will under the hood execute `cargo check`. Any warnings +applicable to your crate will be automatically fixed (if possible) and all +remaining warnings will be displayed when the check process is finished. For +example if you'd like to prepare for the 2018 edition, you can do so by +executing: + + cargo fix --edition + +which behaves the same as `cargo check --all-targets`. Similarly if you'd like +to fix code for different platforms you can do: + + cargo fix --edition --target x86_64-pc-windows-gnu + +or if your crate has optional features: + + cargo fix --edition --no-default-features --features foo + +If you encounter any problems with `cargo fix` or otherwise have any questions +or feature requests please don't hesitate to file an issue at +https://github.com/rust-lang/cargo +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let test = match args.value_of("profile") { + Some("test") => true, + None => false, + Some(profile) => { + let err = failure::format_err!( + "unknown profile: `{}`, only `test` is \ + currently supported", + profile + ); + return Err(CliError::new(err, 101)); + } + }; + let mode = CompileMode::Check { test }; + + // Unlike other commands default `cargo fix` to all targets to fix as much + // code as we can. + let mut opts = args.compile_options(config, mode, Some(&ws))?; + + if let CompileFilter::Default { .. } = opts.filter { + opts.filter = CompileFilter::Only { + all_targets: true, + lib: true, + bins: FilterRule::All, + examples: FilterRule::All, + benches: FilterRule::All, + tests: FilterRule::All, + } + } + ops::fix( + &ws, + &mut ops::FixOptions { + edition: args.is_present("edition"), + prepare_for: args.value_of("prepare-for"), + idioms: args.is_present("idioms"), + compile_opts: opts, + allow_dirty: args.is_present("allow-dirty"), + allow_no_vcs: args.is_present("allow-no-vcs"), + allow_staged: args.is_present("allow-staged"), + broken_code: args.is_present("broken-code"), + }, + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/generate_lockfile.rs b/src/bin/cargo/commands/generate_lockfile.rs new file mode 100644 index 000000000..6e5135a17 --- /dev/null +++ b/src/bin/cargo/commands/generate_lockfile.rs @@ -0,0 +1,15 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("generate-lockfile") + .about("Generate the lockfile for a package") + .arg_manifest_path() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + ops::generate_lockfile(&ws)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/git_checkout.rs b/src/bin/cargo/commands/git_checkout.rs new file mode 100644 index 000000000..c229307c4 --- /dev/null +++ b/src/bin/cargo/commands/git_checkout.rs @@ -0,0 +1,36 @@ +use crate::command_prelude::*; + +use cargo::core::{GitReference, Source, SourceId}; +use cargo::sources::GitSource; +use cargo::util::ToUrl; + +pub fn cli() -> App { + subcommand("git-checkout") + .about("Checkout a copy of a Git repository") + .arg( + Arg::with_name("url") + .long("url") + .value_name("URL") + .required(true), + ) + .arg( + Arg::with_name("reference") + .long("reference") + .value_name("REF") + .required(true), + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let url = args.value_of("url").unwrap().to_url()?; + let reference = args.value_of("reference").unwrap(); + + let reference = GitReference::Branch(reference.to_string()); + let source_id = SourceId::for_git(&url, reference)?; + + let mut source = GitSource::new(source_id, config)?; + + source.update()?; + + Ok(()) +} diff --git a/src/bin/cargo/commands/init.rs b/src/bin/cargo/commands/init.rs new file mode 100644 index 000000000..8fb765202 --- /dev/null +++ b/src/bin/cargo/commands/init.rs @@ -0,0 +1,20 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("init") + .about("Create a new cargo package in an existing directory") + .arg(Arg::with_name("path").default_value(".")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .arg_new_opts() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let opts = args.new_options(config)?; + ops::init(&opts, config)?; + config + .shell() + .status("Created", format!("{} package", opts.kind))?; + Ok(()) +} diff --git a/src/bin/cargo/commands/install.rs b/src/bin/cargo/commands/install.rs new file mode 100644 index 000000000..a0a9fdfe3 --- /dev/null +++ b/src/bin/cargo/commands/install.rs @@ -0,0 +1,134 @@ +use crate::command_prelude::*; + +use cargo::core::{GitReference, SourceId}; +use cargo::ops; +use cargo::util::ToUrl; + +pub fn cli() -> App { + subcommand("install") + .about("Install a Rust binary. Default location is $HOME/.cargo/bin") + .arg(Arg::with_name("crate").empty_values(false).multiple(true)) + .arg( + opt("version", "Specify a version to install from crates.io") + .alias("vers") + .value_name("VERSION"), + ) + .arg(opt("git", "Git URL to install the specified crate from").value_name("URL")) + .arg(opt("branch", "Branch to use when installing from git").value_name("BRANCH")) + .arg(opt("tag", "Tag to use when installing from git").value_name("TAG")) + .arg(opt("rev", "Specific commit to use when installing from git").value_name("SHA")) + .arg(opt("path", "Filesystem path to local crate to install").value_name("PATH")) + .arg(opt( + "list", + "list all installed packages and their versions", + )) + .arg_jobs() + .arg(opt("force", "Force overwriting existing crates or binaries").short("f")) + .arg_features() + .arg(opt("debug", "Build in debug mode instead of release mode")) + .arg_targets_bins_examples( + "Install only the specified binary", + "Install all binaries", + "Install only the specified example", + "Install all examples", + ) + .arg_target_triple("Build for the target triple") + .arg(opt("root", "Directory to install packages into").value_name("DIR")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .after_help( + "\ +This command manages Cargo's local set of installed binary crates. Only packages +which have [[bin]] targets can be installed, and all binaries are installed into +the installation root's `bin` folder. The installation root is determined, in +order of precedence, by `--root`, `$CARGO_INSTALL_ROOT`, the `install.root` +configuration key, and finally the home directory (which is either +`$CARGO_HOME` if set or `$HOME/.cargo` by default). + +There are multiple sources from which a crate can be installed. The default +location is crates.io but the `--git` and `--path` flags can change this source. +If the source contains more than one package (such as crates.io or a git +repository with multiple crates) the `` argument is required to indicate +which crate should be installed. + +Crates from crates.io can optionally specify the version they wish to install +via the `--version` flags, and similarly packages from git repositories can +optionally specify the branch, tag, or revision that should be installed. If a +crate has multiple binaries, the `--bin` argument can selectively install only +one of them, and if you'd rather install examples the `--example` argument can +be used as well. + +By default cargo will refuse to overwrite existing binaries. The `--force` flag +enables overwriting existing binaries. Thus you can reinstall a crate with +`cargo install --force `. + +Omitting the specification entirely will +install the crate in the current directory. That is, `install` is equivalent to +the more explicit `install --path .`. This behaviour is deprecated, and no +longer supported as of the Rust 2018 edition. + +If the source is crates.io or `--git` then by default the crate will be built +in a temporary target directory. To avoid this, the target directory can be +specified by setting the `CARGO_TARGET_DIR` environment variable to a relative +path. In particular, this can be useful for caching build artifacts on +continuous integration systems.", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let registry = args.registry(config)?; + + config.reload_rooted_at_cargo_home()?; + + let workspace = args.workspace(config).ok(); + let mut compile_opts = args.compile_options(config, CompileMode::Build, workspace.as_ref())?; + + compile_opts.build_config.release = !args.is_present("debug"); + + let krates = args + .values_of("crate") + .unwrap_or_default() + .collect::>(); + + let mut from_cwd = false; + + let source = if let Some(url) = args.value_of("git") { + let url = url.to_url()?; + let gitref = if let Some(branch) = args.value_of("branch") { + GitReference::Branch(branch.to_string()) + } else if let Some(tag) = args.value_of("tag") { + GitReference::Tag(tag.to_string()) + } else if let Some(rev) = args.value_of("rev") { + GitReference::Rev(rev.to_string()) + } else { + GitReference::Branch("master".to_string()) + }; + SourceId::for_git(&url, gitref)? + } else if let Some(path) = args.value_of_path("path", config) { + SourceId::for_path(&path)? + } else if krates.is_empty() { + from_cwd = true; + SourceId::for_path(config.cwd())? + } else if let Some(registry) = registry { + SourceId::alt_registry(config, ®istry)? + } else { + SourceId::crates_io(config)? + }; + + let version = args.value_of("version"); + let root = args.value_of("root"); + + if args.is_present("list") { + ops::install_list(root, config)?; + } else { + ops::install( + root, + krates, + source, + from_cwd, + version, + &compile_opts, + args.is_present("force"), + )?; + } + Ok(()) +} diff --git a/src/bin/cargo/commands/locate_project.rs b/src/bin/cargo/commands/locate_project.rs new file mode 100644 index 000000000..8fb1cc600 --- /dev/null +++ b/src/bin/cargo/commands/locate_project.rs @@ -0,0 +1,34 @@ +use crate::command_prelude::*; + +use cargo::print_json; +use serde::Serialize; + +pub fn cli() -> App { + subcommand("locate-project") + .about("Print a JSON representation of a Cargo.toml file's location") + .arg_manifest_path() +} + +#[derive(Serialize)] +pub struct ProjectLocation<'a> { + root: &'a str, +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let root = args.root_manifest(config)?; + + let root = root + .to_str() + .ok_or_else(|| { + failure::format_err!( + "your package path contains characters \ + not representable in Unicode" + ) + }) + .map_err(|e| CliError::new(e, 1))?; + + let location = ProjectLocation { root }; + + print_json(&location); + Ok(()) +} diff --git a/src/bin/cargo/commands/login.rs b/src/bin/cargo/commands/login.rs new file mode 100644 index 000000000..5d76e7e12 --- /dev/null +++ b/src/bin/cargo/commands/login.rs @@ -0,0 +1,27 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("login") + .about( + "Save an api token from the registry locally. \ + If token is not specified, it will be read from stdin.", + ) + .arg(Arg::with_name("token")) + .arg( + opt("host", "Host to set the token for") + .value_name("HOST") + .hidden(true), + ) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + ops::registry_login( + config, + args.value_of("token").map(String::from), + args.value_of("registry").map(String::from), + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/metadata.rs b/src/bin/cargo/commands/metadata.rs new file mode 100644 index 000000000..eb2a453bb --- /dev/null +++ b/src/bin/cargo/commands/metadata.rs @@ -0,0 +1,53 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, OutputMetadataOptions}; +use cargo::print_json; + +pub fn cli() -> App { + subcommand("metadata") + .about( + "Output the resolved dependencies of a package, \ + the concrete used versions including overrides, \ + in machine-readable format", + ) + .arg_features() + .arg(opt( + "no-deps", + "Output information only about the root package \ + and don't fetch dependencies", + )) + .arg_manifest_path() + .arg( + opt("format-version", "Format version") + .value_name("VERSION") + .possible_value("1"), + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + + let version = match args.value_of("format-version") { + None => { + config.shell().warn( + "\ + please specify `--format-version` flag explicitly \ + to avoid compatibility problems", + )?; + 1 + } + Some(version) => version.parse().unwrap(), + }; + + let options = OutputMetadataOptions { + features: values(args, "features"), + all_features: args.is_present("all-features"), + no_default_features: args.is_present("no-default-features"), + no_deps: args.is_present("no-deps"), + version, + }; + + let result = ops::output_metadata(&ws, &options)?; + print_json(&result); + Ok(()) +} diff --git a/src/bin/cargo/commands/mod.rs b/src/bin/cargo/commands/mod.rs new file mode 100644 index 000000000..c62f2aad2 --- /dev/null +++ b/src/bin/cargo/commands/mod.rs @@ -0,0 +1,104 @@ +use crate::command_prelude::*; + +pub fn builtin() -> Vec { + vec![ + bench::cli(), + build::cli(), + check::cli(), + clean::cli(), + doc::cli(), + fetch::cli(), + fix::cli(), + generate_lockfile::cli(), + git_checkout::cli(), + init::cli(), + install::cli(), + locate_project::cli(), + login::cli(), + metadata::cli(), + new::cli(), + owner::cli(), + package::cli(), + pkgid::cli(), + publish::cli(), + read_manifest::cli(), + run::cli(), + rustc::cli(), + rustdoc::cli(), + search::cli(), + test::cli(), + uninstall::cli(), + update::cli(), + verify_project::cli(), + version::cli(), + yank::cli(), + ] +} + +pub fn builtin_exec(cmd: &str) -> Option) -> CliResult> { + let f = match cmd { + "bench" => bench::exec, + "build" => build::exec, + "check" => check::exec, + "clean" => clean::exec, + "doc" => doc::exec, + "fetch" => fetch::exec, + "fix" => fix::exec, + "generate-lockfile" => generate_lockfile::exec, + "git-checkout" => git_checkout::exec, + "init" => init::exec, + "install" => install::exec, + "locate-project" => locate_project::exec, + "login" => login::exec, + "metadata" => metadata::exec, + "new" => new::exec, + "owner" => owner::exec, + "package" => package::exec, + "pkgid" => pkgid::exec, + "publish" => publish::exec, + "read-manifest" => read_manifest::exec, + "run" => run::exec, + "rustc" => rustc::exec, + "rustdoc" => rustdoc::exec, + "search" => search::exec, + "test" => test::exec, + "uninstall" => uninstall::exec, + "update" => update::exec, + "verify-project" => verify_project::exec, + "version" => version::exec, + "yank" => yank::exec, + _ => return None, + }; + Some(f) +} + +pub mod bench; +pub mod build; +pub mod check; +pub mod clean; +pub mod doc; +pub mod fetch; +pub mod fix; +pub mod generate_lockfile; +pub mod git_checkout; +pub mod init; +pub mod install; +pub mod locate_project; +pub mod login; +pub mod metadata; +pub mod new; +pub mod owner; +pub mod package; +pub mod pkgid; +pub mod publish; +pub mod read_manifest; +pub mod run; +pub mod rustc; +pub mod rustdoc; +pub mod search; +pub mod test; +pub mod uninstall; +pub mod update; +pub mod verify_project; +pub mod version; +pub mod yank; diff --git a/src/bin/cargo/commands/new.rs b/src/bin/cargo/commands/new.rs new file mode 100644 index 000000000..517b9085d --- /dev/null +++ b/src/bin/cargo/commands/new.rs @@ -0,0 +1,28 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("new") + .about("Create a new cargo package at ") + .arg(Arg::with_name("path").required(true)) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .arg_new_opts() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let opts = args.new_options(config)?; + + ops::new(&opts, config)?; + let path = args.value_of("path").unwrap(); + let package_name = if let Some(name) = args.value_of("name") { + name + } else { + path + }; + config.shell().status( + "Created", + format!("{} `{}` package", opts.kind, package_name), + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/owner.rs b/src/bin/cargo/commands/owner.rs new file mode 100644 index 000000000..09a76b96b --- /dev/null +++ b/src/bin/cargo/commands/owner.rs @@ -0,0 +1,50 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, OwnersOptions}; + +pub fn cli() -> App { + subcommand("owner") + .about("Manage the owners of a crate on the registry") + .arg(Arg::with_name("crate")) + .arg(multi_opt("add", "LOGIN", "Name of a user or team to invite as an owner").short("a")) + .arg( + multi_opt( + "remove", + "LOGIN", + "Name of a user or team to remove as an owner", + ) + .short("r"), + ) + .arg(opt("list", "List owners of a crate").short("l")) + .arg(opt("index", "Registry index to modify owners for").value_name("INDEX")) + .arg(opt("token", "API token to use when authenticating").value_name("TOKEN")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .after_help( + "\ +This command will modify the owners for a crate on the specified registry (or +default). Owners of a crate can upload new versions and yank old versions. +Explicitly named owners can also modify the set of owners, so take care! + + See https://doc.rust-lang.org/cargo/reference/publishing.html#cargo-owner + for detailed documentation and troubleshooting.", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let registry = args.registry(config)?; + let opts = OwnersOptions { + krate: args.value_of("crate").map(|s| s.to_string()), + token: args.value_of("token").map(|s| s.to_string()), + index: args.value_of("index").map(|s| s.to_string()), + to_add: args + .values_of("add") + .map(|xs| xs.map(|s| s.to_string()).collect()), + to_remove: args + .values_of("remove") + .map(|xs| xs.map(|s| s.to_string()).collect()), + list: args.is_present("list"), + registry, + }; + ops::modify_owners(config, &opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/package.rs b/src/bin/cargo/commands/package.rs new file mode 100644 index 000000000..1964e138c --- /dev/null +++ b/src/bin/cargo/commands/package.rs @@ -0,0 +1,52 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, PackageOpts}; + +pub fn cli() -> App { + subcommand("package") + .about("Assemble the local package into a distributable tarball") + .arg( + opt( + "list", + "Print files included in a package without making one", + ) + .short("l"), + ) + .arg(opt( + "no-verify", + "Don't verify the contents by building them", + )) + .arg(opt( + "no-metadata", + "Ignore warnings about a lack of human-usable metadata", + )) + .arg(opt( + "allow-dirty", + "Allow dirty working directories to be packaged", + )) + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_features() + .arg_manifest_path() + .arg_jobs() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + ops::package( + &ws, + &PackageOpts { + config, + verify: !args.is_present("no-verify"), + list: args.is_present("list"), + check_metadata: !args.is_present("no-metadata"), + allow_dirty: args.is_present("allow-dirty"), + target: args.target(), + jobs: args.jobs()?, + features: args._values_of("features"), + all_features: args.is_present("all-features"), + no_default_features: args.is_present("no-default-features"), + }, + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/pkgid.rs b/src/bin/cargo/commands/pkgid.rs new file mode 100644 index 000000000..2cf423d37 --- /dev/null +++ b/src/bin/cargo/commands/pkgid.rs @@ -0,0 +1,41 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("pkgid") + .about("Print a fully qualified package specification") + .arg(Arg::with_name("spec")) + .arg_package("Argument to get the package ID specifier for") + .arg_manifest_path() + .after_help( + "\ +Given a argument, print out the fully qualified package ID specifier. +This command will generate an error if is ambiguous as to which package +it refers to in the dependency graph. If no is given, then the pkgid for +the local package is printed. + +This command requires that a lockfile is available and dependencies have been +fetched. + +Example Package IDs + + pkgid | name | version | url + |-----------------------------|--------|-----------|---------------------| + foo | foo | * | * + foo:1.2.3 | foo | 1.2.3 | * + crates.io/foo | foo | * | *://crates.io/foo + crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo + crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar + http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let spec = args.value_of("spec").or_else(|| args.value_of("package")); + let spec = ops::pkgid(&ws, spec)?; + println!("{}", spec); + Ok(()) +} diff --git a/src/bin/cargo/commands/publish.rs b/src/bin/cargo/commands/publish.rs new file mode 100644 index 000000000..67b4ed2d2 --- /dev/null +++ b/src/bin/cargo/commands/publish.rs @@ -0,0 +1,50 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, PublishOpts}; + +pub fn cli() -> App { + subcommand("publish") + .about("Upload a package to the registry") + .arg_index() + .arg(opt("token", "Token to use when uploading").value_name("TOKEN")) + .arg(opt( + "no-verify", + "Don't verify the contents by building them", + )) + .arg(opt( + "allow-dirty", + "Allow dirty working directories to be packaged", + )) + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_features() + .arg_jobs() + .arg_dry_run("Perform all checks without uploading") + .arg(opt("registry", "Registry to publish to").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let registry = args.registry(config)?; + let ws = args.workspace(config)?; + let index = args.index(config)?; + + ops::publish( + &ws, + &PublishOpts { + config, + token: args.value_of("token").map(|s| s.to_string()), + index, + verify: !args.is_present("no-verify"), + allow_dirty: args.is_present("allow-dirty"), + target: args.target(), + jobs: args.jobs()?, + dry_run: args.is_present("dry-run"), + registry, + features: args._values_of("features"), + all_features: args.is_present("all-features"), + no_default_features: args.is_present("no-default-features"), + }, + )?; + Ok(()) +} diff --git a/src/bin/cargo/commands/read_manifest.rs b/src/bin/cargo/commands/read_manifest.rs new file mode 100644 index 000000000..b88787064 --- /dev/null +++ b/src/bin/cargo/commands/read_manifest.rs @@ -0,0 +1,21 @@ +use crate::command_prelude::*; + +use cargo::print_json; + +pub fn cli() -> App { + subcommand("read-manifest") + .about( + "\ +Print a JSON representation of a Cargo.toml manifest. + +Deprecated, use `cargo metadata --no-deps` instead.\ +", + ) + .arg_manifest_path() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + print_json(&ws.current()?); + Ok(()) +} diff --git a/src/bin/cargo/commands/run.rs b/src/bin/cargo/commands/run.rs new file mode 100644 index 000000000..3f37fc091 --- /dev/null +++ b/src/bin/cargo/commands/run.rs @@ -0,0 +1,96 @@ +use crate::command_prelude::*; + +use cargo::core::Verbosity; +use cargo::ops::{self, CompileFilter}; + +pub fn cli() -> App { + subcommand("run") + // subcommand aliases are handled in aliased_command() + // .alias("r") + .setting(AppSettings::TrailingVarArg) + .about("Run a binary or example of the local package") + .arg(Arg::with_name("args").multiple(true)) + .arg_targets_bin_example( + "Name of the bin target to run", + "Name of the example target to run", + ) + .arg_package("Package with the target to run") + .arg_jobs() + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +If neither `--bin` nor `--example` are given, then if the package only has one +bin target it will be run. Otherwise `--bin` specifies the bin target to run, +and `--example` specifies the example target to run. At most one of `--bin` or +`--example` can be provided. + +All the arguments following the two dashes (`--`) are passed to the binary to +run. If you're passing arguments to both Cargo and the binary, the ones after +`--` go to the binary, the ones before go to Cargo. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + + let mut compile_opts = args.compile_options(config, CompileMode::Build, Some(&ws))?; + + if !args.is_present("example") && !args.is_present("bin") { + let default_runs: Vec<_> = compile_opts + .spec + .get_packages(&ws)? + .iter() + .filter_map(|pkg| pkg.manifest().default_run()) + .collect(); + if default_runs.len() == 1 { + compile_opts.filter = CompileFilter::new( + false, + vec![default_runs[0].to_owned()], + false, + vec![], + false, + vec![], + false, + vec![], + false, + false, + ); + } else { + // ops::run will take care of errors if len pkgs != 1. + compile_opts.filter = CompileFilter::Default { + // Force this to false because the code in ops::run is not + // able to pre-check features before compilation starts to + // enforce that only 1 binary is built. + required_features_filterable: false, + }; + } + }; + match ops::run(&ws, &compile_opts, &values(args, "args"))? { + None => Ok(()), + Some(err) => { + // If we never actually spawned the process then that sounds pretty + // bad and we always want to forward that up. + let exit = match err.exit { + Some(exit) => exit, + None => return Err(CliError::new(err.into(), 101)), + }; + + // If `-q` was passed then we suppress extra error information about + // a failed process, we assume the process itself printed out enough + // information about why it failed so we don't do so as well + let exit_code = exit.code().unwrap_or(101); + let is_quiet = config.shell().verbosity() == Verbosity::Quiet; + Err(if is_quiet { + CliError::code(exit_code) + } else { + CliError::new(err.into(), exit_code) + }) + } + } +} diff --git a/src/bin/cargo/commands/rustc.rs b/src/bin/cargo/commands/rustc.rs new file mode 100644 index 000000000..b0400b556 --- /dev/null +++ b/src/bin/cargo/commands/rustc.rs @@ -0,0 +1,74 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("rustc") + .setting(AppSettings::TrailingVarArg) + .about("Compile a package and all of its dependencies") + .arg(Arg::with_name("args").multiple(true)) + .arg_package("Package to build") + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg(opt("profile", "Profile to build the selected target for").value_name("PROFILE")) + .arg_features() + .arg_target_triple("Target triple which compiles will be for") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The specified target for the current package (or package specified by SPEC if +provided) will be compiled along with all of its dependencies. The specified +... will all be passed to the final compiler invocation, not any of the +dependencies. Note that the compiler will still unconditionally receive +arguments such as -L, --extern, and --crate-type, and the specified ... +will simply be added to the compiler invocation. + +This command requires that only one target is being compiled. If more than one +target is available for the current package the filters of --lib, --bin, etc, +must be used to select which target is compiled. To pass flags to all compiler +processes spawned by Cargo, use the $RUSTFLAGS environment variable or the +`build.rustflags` configuration option. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let mode = match args.value_of("profile") { + Some("dev") | None => CompileMode::Build, + Some("test") => CompileMode::Test, + Some("bench") => CompileMode::Bench, + Some("check") => CompileMode::Check { test: false }, + Some(mode) => { + let err = failure::format_err!( + "unknown profile: `{}`, use dev, + test, or bench", + mode + ); + return Err(CliError::new(err, 101)); + } + }; + let mut compile_opts = args.compile_options_for_single_package(config, mode, Some(&ws))?; + let target_args = values(args, "args"); + compile_opts.target_rustc_args = if target_args.is_empty() { + None + } else { + Some(target_args) + }; + ops::compile(&ws, &compile_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/rustdoc.rs b/src/bin/cargo/commands/rustdoc.rs new file mode 100644 index 000000000..ad2ed4371 --- /dev/null +++ b/src/bin/cargo/commands/rustdoc.rs @@ -0,0 +1,70 @@ +use cargo::ops::{self, DocOptions}; + +use crate::command_prelude::*; + +pub fn cli() -> App { + subcommand("rustdoc") + .setting(AppSettings::TrailingVarArg) + .about("Build a package's documentation, using specified custom flags.") + .arg(Arg::with_name("args").multiple(true)) + .arg(opt( + "open", + "Opens the docs in a browser after the operation", + )) + .arg_package("Package to document") + .arg_jobs() + .arg_targets_all( + "Build only this package's library", + "Build only the specified binary", + "Build all binaries", + "Build only the specified example", + "Build all examples", + "Build only the specified test target", + "Build all tests", + "Build only the specified bench target", + "Build all benches", + "Build all targets", + ) + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The specified target for the current package (or package specified by SPEC if +provided) will be documented with the specified `...` being passed to the +final rustdoc invocation. Dependencies will not be documented as part of this +command. Note that rustdoc will still unconditionally receive arguments such +as `-L`, `--extern`, and `--crate-type`, and the specified `...` will +simply be added to the rustdoc invocation. + +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + let mut compile_opts = args.compile_options_for_single_package( + config, + CompileMode::Doc { deps: false }, + Some(&ws), + )?; + let target_args = values(args, "args"); + compile_opts.target_rustdoc_args = if target_args.is_empty() { + None + } else { + Some(target_args) + }; + let doc_opts = DocOptions { + open_result: args.is_present("open"), + compile_opts, + }; + ops::doc(&ws, &doc_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/search.rs b/src/bin/cargo/commands/search.rs new file mode 100644 index 000000000..f9cf7e25d --- /dev/null +++ b/src/bin/cargo/commands/search.rs @@ -0,0 +1,31 @@ +use crate::command_prelude::*; + +use std::cmp::min; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("search") + .about("Search packages in crates.io") + .arg(Arg::with_name("query").multiple(true)) + .arg_index() + .arg( + opt( + "limit", + "Limit the number of results (default: 10, max: 100)", + ) + .value_name("LIMIT"), + ) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let registry = args.registry(config)?; + let index = args.index(config)?; + let limit = args.value_of_u32("limit")?; + let limit = min(100, limit.unwrap_or(10)); + let query: Vec<&str> = args.values_of("query").unwrap_or_default().collect(); + let query: String = query.join("+"); + ops::search(&query, config, index, limit, registry)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/test.rs b/src/bin/cargo/commands/test.rs new file mode 100644 index 000000000..dca409845 --- /dev/null +++ b/src/bin/cargo/commands/test.rs @@ -0,0 +1,151 @@ +use cargo::ops::{self, CompileFilter}; + +use crate::command_prelude::*; + +pub fn cli() -> App { + subcommand("test") + // Subcommand aliases are handled in `aliased_command()`. + // .alias("t") + .setting(AppSettings::TrailingVarArg) + .about("Execute all unit and integration tests and build examples of a local package") + .arg( + Arg::with_name("TESTNAME") + .help("If specified, only run tests containing this string in their names"), + ) + .arg( + Arg::with_name("args") + .help("Arguments for the test binary") + .multiple(true) + .last(true), + ) + .arg_targets_all( + "Test only this package's library unit tests", + "Test only the specified binary", + "Test all binaries", + "Test only the specified example", + "Test all examples", + "Test only the specified test target", + "Test all tests", + "Test only the specified bench target", + "Test all benches", + "Test all targets", + ) + .arg(opt("doc", "Test only this library's documentation")) + .arg(opt("no-run", "Compile, but don't run tests")) + .arg(opt("no-fail-fast", "Run all tests regardless of failure")) + .arg_package_spec( + "Package to run tests for", + "Test all packages in the workspace", + "Exclude packages from the test", + ) + .arg_jobs() + .arg_release("Build artifacts in release mode, with optimizations") + .arg_features() + .arg_target_triple("Build for the target triple") + .arg_target_dir() + .arg_manifest_path() + .arg_message_format() + .after_help( + "\ +The test filtering argument TESTNAME and all the arguments following the +two dashes (`--`) are passed to the test binaries and thus to libtest +(rustc's built in unit-test and micro-benchmarking framework). If you're +passing arguments to both Cargo and the binary, the ones after `--` go to the +binary, the ones before go to Cargo. For details about libtest's arguments see +the output of `cargo test -- --help`. As an example, this will run all +tests with `foo` in their name on 3 threads in parallel: + + cargo test foo -- --test-threads 3 + +If the `--package` argument is given, then SPEC is a package ID specification +which indicates which package should be tested. If it is not given, then the +current package is tested. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are tested if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The `--jobs` argument affects the building of the test executable but does +not affect how many jobs are used when running the tests. The default value +for the `--jobs` argument is the number of CPUs. If you want to control the +number of simultaneous running test cases, pass the `--test-threads` option +to the test binaries: + + cargo test -- --test-threads=1 + +Compilation can be configured via the `test` profile in the manifest. + +By default the rust test harness hides output from test execution to +keep results readable. Test output can be recovered (e.g., for debugging) +by passing `--nocapture` to the test binaries: + + cargo test -- --nocapture + +To get the list of all options available for the test binaries use this: + + cargo test -- --help +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + + let mut compile_opts = args.compile_options(config, CompileMode::Test, Some(&ws))?; + + let no_run = args.is_present("no-run"); + let doc = args.is_present("doc"); + if doc { + if let CompileFilter::Only { .. } = compile_opts.filter { + return Err(CliError::new( + failure::format_err!("Can't mix --doc with other target selecting options"), + 101, + )); + } + if no_run { + return Err(CliError::new( + failure::format_err!("Can't skip running doc tests with --no-run"), + 101, + )); + } + compile_opts.build_config.mode = CompileMode::Doctest; + compile_opts.filter = ops::CompileFilter::new( + true, + Vec::new(), + false, + Vec::new(), + false, + Vec::new(), + false, + Vec::new(), + false, + false, + ); + } + + let ops = ops::TestOptions { + no_run, + no_fail_fast: args.is_present("no-fail-fast"), + compile_opts, + }; + + // `TESTNAME` is actually an argument of the test binary, but it's + // important, so we explicitly mention it and reconfigure. + let mut test_args = vec![]; + test_args.extend(args.value_of("TESTNAME").into_iter().map(|s| s.to_string())); + test_args.extend( + args.values_of("args") + .unwrap_or_default() + .map(|s| s.to_string()), + ); + + let err = ops::run_tests(&ws, &ops, &test_args)?; + match err { + None => Ok(()), + Some(err) => Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new(failure::format_err!("{}", err.hint(&ws)), i), + None => CliError::new(err.into(), 101), + }), + } +} diff --git a/src/bin/cargo/commands/uninstall.rs b/src/bin/cargo/commands/uninstall.rs new file mode 100644 index 000000000..90d21d16b --- /dev/null +++ b/src/bin/cargo/commands/uninstall.rs @@ -0,0 +1,30 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("uninstall") + .about("Remove a Rust binary") + .arg(Arg::with_name("spec").multiple(true)) + .arg_package_spec_simple("Package to uninstall") + .arg(multi_opt("bin", "NAME", "Only uninstall the binary NAME")) + .arg(opt("root", "Directory to uninstall packages from").value_name("DIR")) + .after_help( + "\ +The argument SPEC is a package ID specification (see `cargo help pkgid`) to +specify which crate should be uninstalled. By default all binaries are +uninstalled for a crate but the `--bin` and `--example` flags can be used to +only uninstall particular binaries. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let root = args.value_of("root"); + let specs = args + .values_of("spec") + .unwrap_or_else(|| args.values_of("package").unwrap_or_default()) + .collect(); + ops::uninstall(root, specs, &values(args, "bin"), config)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/update.rs b/src/bin/cargo/commands/update.rs new file mode 100644 index 000000000..27ba7dda0 --- /dev/null +++ b/src/bin/cargo/commands/update.rs @@ -0,0 +1,53 @@ +use crate::command_prelude::*; + +use cargo::ops::{self, UpdateOptions}; + +pub fn cli() -> App { + subcommand("update") + .about("Update dependencies as recorded in the local lock file") + .arg_package_spec_simple("Package to update") + .arg(opt( + "aggressive", + "Force updating all dependencies of as well", + )) + .arg_dry_run("Don't actually write the lockfile") + .arg(opt("precise", "Update a single dependency to exactly PRECISE").value_name("PRECISE")) + .arg_manifest_path() + .after_help( + "\ +This command requires that a `Cargo.lock` already exists as generated by +`cargo build` or related commands. + +If SPEC is given, then a conservative update of the lockfile will be +performed. This means that only the dependency specified by SPEC will be +updated. Its transitive dependencies will be updated only if SPEC cannot be +updated without updating dependencies. All other dependencies will remain +locked at their currently recorded versions. + +If PRECISE is specified, then `--aggressive` must not also be specified. The +argument PRECISE is a string representing a precise revision that the package +being updated should be updated to. For example, if the package comes from a git +repository, then PRECISE would be the exact revision that the repository should +be updated to. + +If SPEC is not given, then all dependencies will be re-resolved and +updated. + +For more information about package ID specifications, see `cargo help pkgid`. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let ws = args.workspace(config)?; + + let update_opts = UpdateOptions { + aggressive: args.is_present("aggressive"), + precise: args.value_of("precise"), + to_update: values(args, "package"), + dry_run: args.is_present("dry-run"), + config, + }; + ops::update_lockfile(&ws, &update_opts)?; + Ok(()) +} diff --git a/src/bin/cargo/commands/verify_project.rs b/src/bin/cargo/commands/verify_project.rs new file mode 100644 index 000000000..0b17e4e50 --- /dev/null +++ b/src/bin/cargo/commands/verify_project.rs @@ -0,0 +1,30 @@ +use crate::command_prelude::*; + +use std::collections::HashMap; +use std::process; + +use cargo::print_json; + +pub fn cli() -> App { + subcommand("verify-project") + .about("Check correctness of crate manifest") + .arg_manifest_path() +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + fn fail(reason: &str, value: &str) -> ! { + let mut h = HashMap::new(); + h.insert(reason.to_string(), value.to_string()); + print_json(&h); + process::exit(1) + } + + if let Err(e) = args.workspace(config) { + fail("invalid", &e.to_string()) + } + + let mut h = HashMap::new(); + h.insert("success".to_string(), "true".to_string()); + print_json(&h); + Ok(()) +} diff --git a/src/bin/cargo/commands/version.rs b/src/bin/cargo/commands/version.rs new file mode 100644 index 000000000..d546ff705 --- /dev/null +++ b/src/bin/cargo/commands/version.rs @@ -0,0 +1,14 @@ +use crate::command_prelude::*; + +use crate::cli; + +pub fn cli() -> App { + subcommand("version").about("Show version information") +} + +pub fn exec(_config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let verbose = args.occurrences_of("verbose") > 0; + let version = cli::get_version_string(verbose); + print!("{}", version); + Ok(()) +} diff --git a/src/bin/cargo/commands/yank.rs b/src/bin/cargo/commands/yank.rs new file mode 100644 index 000000000..a99714816 --- /dev/null +++ b/src/bin/cargo/commands/yank.rs @@ -0,0 +1,43 @@ +use crate::command_prelude::*; + +use cargo::ops; + +pub fn cli() -> App { + subcommand("yank") + .about("Remove a pushed crate from the index") + .arg(Arg::with_name("crate")) + .arg(opt("vers", "The version to yank or un-yank").value_name("VERSION")) + .arg(opt( + "undo", + "Undo a yank, putting a version back into the index", + )) + .arg(opt("index", "Registry index to yank from").value_name("INDEX")) + .arg(opt("token", "API token to use when authenticating").value_name("TOKEN")) + .arg(opt("registry", "Registry to use").value_name("REGISTRY")) + .after_help( + "\ +The yank command removes a previously pushed crate's version from the server's +index. This command does not delete any data, and the crate will still be +available for download via the registry's download link. + +Note that existing crates locked to a yanked version will still be able to +download the yanked version to use it. Cargo will, however, not allow any new +crates to be locked to any yanked version. +", + ) +} + +pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { + let registry = args.registry(config)?; + + ops::yank( + config, + args.value_of("crate").map(|s| s.to_string()), + args.value_of("vers").map(|s| s.to_string()), + args.value_of("token").map(|s| s.to_string()), + args.value_of("index").map(|s| s.to_string()), + args.is_present("undo"), + registry, + )?; + Ok(()) +} diff --git a/src/bin/cargo/main.rs b/src/bin/cargo/main.rs new file mode 100644 index 000000000..d0a18293f --- /dev/null +++ b/src/bin/cargo/main.rs @@ -0,0 +1,215 @@ +#![warn(rust_2018_idioms)] // while we're getting used to 2018 +#![allow(clippy::too_many_arguments)] // large project +#![allow(clippy::redundant_closure)] // there's a false positive + +use std::collections::BTreeSet; +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; + +use cargo::core::shell::Shell; +use cargo::util::{self, command_prelude, lev_distance, CargoResult, CliResult, Config}; +use cargo::util::{CliError, ProcessError}; + +mod cli; +mod commands; + +use crate::command_prelude::*; + +fn main() { + #[cfg(feature = "pretty-env-logger")] + pretty_env_logger::init(); + #[cfg(not(feature = "pretty-env-logger"))] + env_logger::init(); + cargo::core::maybe_allow_nightly_features(); + + let mut config = match Config::default() { + Ok(cfg) => cfg, + Err(e) => { + let mut shell = Shell::new(); + cargo::exit_with_error(e.into(), &mut shell) + } + }; + + let result = match cargo::ops::fix_maybe_exec_rustc() { + Ok(true) => Ok(()), + Ok(false) => { + init_git_transports(&config); + let _token = cargo::util::job::setup(); + cli::main(&mut config) + } + Err(e) => Err(CliError::from(e)), + }; + + match result { + Err(e) => cargo::exit_with_error(e, &mut *config.shell()), + Ok(()) => {} + } +} + +fn aliased_command(config: &Config, command: &str) -> CargoResult>> { + let alias_name = format!("alias.{}", command); + let user_alias = match config.get_string(&alias_name) { + Ok(Some(record)) => Some( + record + .val + .split_whitespace() + .map(|s| s.to_string()) + .collect(), + ), + Ok(None) => None, + Err(_) => config + .get_list(&alias_name)? + .map(|record| record.val.iter().map(|s| s.0.to_string()).collect()), + }; + let result = user_alias.or_else(|| match command { + "b" => Some(vec!["build".to_string()]), + "c" => Some(vec!["check".to_string()]), + "r" => Some(vec!["run".to_string()]), + "t" => Some(vec!["test".to_string()]), + _ => None, + }); + Ok(result) +} + +/// List all runnable commands +fn list_commands(config: &Config) -> BTreeSet { + let prefix = "cargo-"; + let suffix = env::consts::EXE_SUFFIX; + let mut commands = BTreeSet::new(); + for dir in search_directories(config) { + let entries = match fs::read_dir(dir) { + Ok(entries) => entries, + _ => continue, + }; + for entry in entries.filter_map(|e| e.ok()) { + let path = entry.path(); + let filename = match path.file_name().and_then(|s| s.to_str()) { + Some(filename) => filename, + _ => continue, + }; + if !filename.starts_with(prefix) || !filename.ends_with(suffix) { + continue; + } + if is_executable(entry.path()) { + let end = filename.len() - suffix.len(); + commands.insert(CommandInfo::External { + name: filename[prefix.len()..end].to_string(), + path: path.clone(), + }); + } + } + } + + for cmd in commands::builtin() { + commands.insert(CommandInfo::BuiltIn { + name: cmd.get_name().to_string(), + about: cmd.p.meta.about.map(|s| s.to_string()), + }); + } + + commands +} + +fn find_closest(config: &Config, cmd: &str) -> Option { + let cmds = list_commands(config); + // Only consider candidates with a lev_distance of 3 or less so we don't + // suggest out-of-the-blue options. + cmds.into_iter() + .map(|c| c.name()) + .map(|c| (lev_distance(&c, cmd), c)) + .filter(|&(d, _)| d < 4) + .min_by_key(|a| a.0) + .map(|slot| slot.1) +} + +fn execute_external_subcommand(config: &Config, cmd: &str, args: &[&str]) -> CliResult { + let command_exe = format!("cargo-{}{}", cmd, env::consts::EXE_SUFFIX); + let path = search_directories(config) + .iter() + .map(|dir| dir.join(&command_exe)) + .find(|file| is_executable(file)); + let command = match path { + Some(command) => command, + None => { + let err = match find_closest(config, cmd) { + Some(closest) => failure::format_err!( + "no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", + cmd, + closest + ), + None => failure::format_err!("no such subcommand: `{}`", cmd), + }; + return Err(CliError::new(err, 101)); + } + }; + + let cargo_exe = config.cargo_exe()?; + let err = match util::process(&command) + .env(cargo::CARGO_ENV, cargo_exe) + .args(args) + .exec_replace() + { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + if let Some(perr) = err.downcast_ref::() { + if let Some(code) = perr.exit.as_ref().and_then(|c| c.code()) { + return Err(CliError::code(code)); + } + } + Err(CliError::new(err, 101)) +} + +#[cfg(unix)] +fn is_executable>(path: P) -> bool { + use std::os::unix::prelude::*; + fs::metadata(path) + .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111 != 0) + .unwrap_or(false) +} +#[cfg(windows)] +fn is_executable>(path: P) -> bool { + fs::metadata(path) + .map(|metadata| metadata.is_file()) + .unwrap_or(false) +} + +fn search_directories(config: &Config) -> Vec { + let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; + if let Some(val) = env::var_os("PATH") { + dirs.extend(env::split_paths(&val)); + } + dirs +} + +fn init_git_transports(config: &Config) { + // Only use a custom transport if any HTTP options are specified, + // such as proxies or custom certificate authorities. The custom + // transport, however, is not as well battle-tested. + + match cargo::ops::needs_custom_http_transport(config) { + Ok(true) => {} + _ => return, + } + + let handle = match cargo::ops::http_handle(config) { + Ok(handle) => handle, + Err(..) => return, + }; + + // The unsafety of the registration function derives from two aspects: + // + // 1. This call must be synchronized with all other registration calls as + // well as construction of new transports. + // 2. The argument is leaked. + // + // We're clear on point (1) because this is only called at the start of this + // binary (we know what the state of the world looks like) and we're mostly + // clear on point (2) because we'd only free it after everything is done + // anyway + unsafe { + git2_curl::register(handle); + } +} diff --git a/src/cargo/core/compiler/build_config.rs b/src/cargo/core/compiler/build_config.rs new file mode 100644 index 000000000..dc5e60680 --- /dev/null +++ b/src/cargo/core/compiler/build_config.rs @@ -0,0 +1,213 @@ +use std::cell::RefCell; +use std::path::Path; + +use serde::ser; + +use crate::util::{CargoResult, CargoResultExt, Config, RustfixDiagnosticServer}; + +/// Configuration information for a rustc build. +#[derive(Debug)] +pub struct BuildConfig { + /// The target arch triple. + /// Default: host arch. + pub requested_target: Option, + /// Number of rustc jobs to run in parallel. + pub jobs: u32, + /// `true` if we are building for release. + pub release: bool, + /// The mode we are compiling in. + pub mode: CompileMode, + /// `true` to print stdout in JSON format (for machine reading). + pub message_format: MessageFormat, + /// Force Cargo to do a full rebuild and treat each target as changed. + pub force_rebuild: bool, + /// Output a build plan to stdout instead of actually compiling. + pub build_plan: bool, + /// Use Cargo itself as the wrapper around rustc, only used for `cargo fix`. + pub cargo_as_rustc_wrapper: bool, + /// Extra env vars to inject into rustc commands. + pub extra_rustc_env: Vec<(String, String)>, + /// Extra args to inject into rustc commands. + pub extra_rustc_args: Vec, + pub rustfix_diagnostic_server: RefCell>, +} + +impl BuildConfig { + /// Parses all config files to learn about build configuration. Currently + /// configured options are: + /// + /// * `build.jobs` + /// * `build.target` + /// * `target.$target.ar` + /// * `target.$target.linker` + /// * `target.$target.libfoo.metadata` + pub fn new( + config: &Config, + jobs: Option, + requested_target: &Option, + mode: CompileMode, + ) -> CargoResult { + let requested_target = match requested_target { + &Some(ref target) if target.ends_with(".json") => { + let path = Path::new(target).canonicalize().chain_err(|| { + failure::format_err!("Target path {:?} is not a valid file", target) + })?; + Some( + path.into_os_string() + .into_string() + .map_err(|_| failure::format_err!("Target path is not valid unicode"))?, + ) + } + other => other.clone(), + }; + if let Some(ref s) = requested_target { + if s.trim().is_empty() { + failure::bail!("target was empty") + } + } + let cfg_target = config.get_string("build.target")?.map(|s| s.val); + let target = requested_target.or(cfg_target); + + if jobs == Some(0) { + failure::bail!("jobs must be at least 1") + } + if jobs.is_some() && config.jobserver_from_env().is_some() { + config.shell().warn( + "a `-j` argument was passed to Cargo but Cargo is \ + also configured with an external jobserver in \ + its environment, ignoring the `-j` parameter", + )?; + } + let cfg_jobs: Option = config.get("build.jobs")?; + let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32); + Ok(BuildConfig { + requested_target: target, + jobs, + release: false, + mode, + message_format: MessageFormat::Human, + force_rebuild: false, + build_plan: false, + cargo_as_rustc_wrapper: false, + extra_rustc_env: Vec::new(), + extra_rustc_args: Vec::new(), + rustfix_diagnostic_server: RefCell::new(None), + }) + } + + pub fn json_messages(&self) -> bool { + self.message_format == MessageFormat::Json + } + + pub fn test(&self) -> bool { + self.mode == CompileMode::Test || self.mode == CompileMode::Bench + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum MessageFormat { + Human, + Json, + Short, +} + +/// The general "mode" for what to do. +/// This is used for two purposes. The commands themselves pass this in to +/// `compile_ws` to tell it the general execution strategy. This influences +/// the default targets selected. The other use is in the `Unit` struct +/// to indicate what is being done with a specific target. +#[derive(Clone, Copy, PartialEq, Debug, Eq, Hash, PartialOrd, Ord)] +pub enum CompileMode { + /// A target being built for a test. + Test, + /// Building a target with `rustc` (lib or bin). + Build, + /// Building a target with `rustc` to emit `rmeta` metadata only. If + /// `test` is true, then it is also compiled with `--test` to check it like + /// a test. + Check { test: bool }, + /// Used to indicate benchmarks should be built. This is not used in + /// `Target`, because it is essentially the same as `Test` (indicating + /// `--test` should be passed to rustc) and by using `Test` instead it + /// allows some de-duping of Units to occur. + Bench, + /// A target that will be documented with `rustdoc`. + /// If `deps` is true, then it will also document all dependencies. + Doc { deps: bool }, + /// A target that will be tested with `rustdoc`. + Doctest, + /// A marker for Units that represent the execution of a `build.rs` script. + RunCustomBuild, +} + +impl ser::Serialize for CompileMode { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + use self::CompileMode::*; + match *self { + Test => "test".serialize(s), + Build => "build".serialize(s), + Check { .. } => "check".serialize(s), + Bench => "bench".serialize(s), + Doc { .. } => "doc".serialize(s), + Doctest => "doctest".serialize(s), + RunCustomBuild => "run-custom-build".serialize(s), + } + } +} + +impl CompileMode { + /// Returns `true` if the unit is being checked. + pub fn is_check(self) -> bool { + match self { + CompileMode::Check { .. } => true, + _ => false, + } + } + + /// Returns `true` if this is a doc or doc test. Be careful using this. + /// Although both run rustdoc, the dependencies for those two modes are + /// very different. + pub fn is_doc(self) -> bool { + match self { + CompileMode::Doc { .. } | CompileMode::Doctest => true, + _ => false, + } + } + + /// Returns `true` if this is any type of test (test, benchmark, doc test, or + /// check test). + pub fn is_any_test(self) -> bool { + match self { + CompileMode::Test + | CompileMode::Bench + | CompileMode::Check { test: true } + | CompileMode::Doctest => true, + _ => false, + } + } + + /// Returns `true` if this is the *execution* of a `build.rs` script. + pub fn is_run_custom_build(self) -> bool { + self == CompileMode::RunCustomBuild + } + + /// List of all modes (currently used by `cargo clean -p` for computing + /// all possible outputs). + pub fn all_modes() -> &'static [CompileMode] { + static ALL: [CompileMode; 9] = [ + CompileMode::Test, + CompileMode::Build, + CompileMode::Check { test: true }, + CompileMode::Check { test: false }, + CompileMode::Bench, + CompileMode::Doc { deps: true }, + CompileMode::Doc { deps: false }, + CompileMode::Doctest, + CompileMode::RunCustomBuild, + ]; + &ALL + } +} diff --git a/src/cargo/core/compiler/build_context/mod.rs b/src/cargo/core/compiler/build_context/mod.rs new file mode 100644 index 000000000..789acc8a3 --- /dev/null +++ b/src/cargo/core/compiler/build_context/mod.rs @@ -0,0 +1,406 @@ +use std::collections::HashMap; +use std::env; +use std::path::{Path, PathBuf}; +use std::str; + +use log::debug; + +use crate::core::profiles::Profiles; +use crate::core::{Dependency, Workspace}; +use crate::core::{PackageId, PackageSet, Resolve}; +use crate::util::errors::CargoResult; +use crate::util::{profile, Cfg, CfgExpr, Config, Rustc}; + +use super::{BuildConfig, BuildOutput, Kind, Unit}; + +mod target_info; +pub use self::target_info::{FileFlavor, TargetInfo}; + +/// The build context, containing all information about a build task. +pub struct BuildContext<'a, 'cfg: 'a> { + /// The workspace the build is for. + pub ws: &'a Workspace<'cfg>, + /// The cargo configuration. + pub config: &'cfg Config, + /// The dependency graph for our build. + pub resolve: &'a Resolve, + pub profiles: &'a Profiles, + pub build_config: &'a BuildConfig, + /// Extra compiler args for either `rustc` or `rustdoc`. + pub extra_compiler_args: HashMap, Vec>, + pub packages: &'a PackageSet<'cfg>, + + /// Information about the compiler. + pub rustc: Rustc, + /// Build information for the host arch. + pub host_config: TargetConfig, + /// Build information for the target. + pub target_config: TargetConfig, + pub target_info: TargetInfo, + pub host_info: TargetInfo, +} + +impl<'a, 'cfg> BuildContext<'a, 'cfg> { + pub fn new( + ws: &'a Workspace<'cfg>, + resolve: &'a Resolve, + packages: &'a PackageSet<'cfg>, + config: &'cfg Config, + build_config: &'a BuildConfig, + profiles: &'a Profiles, + extra_compiler_args: HashMap, Vec>, + ) -> CargoResult> { + let rustc = config.rustc(Some(ws))?; + let host_config = TargetConfig::new(config, &rustc.host)?; + let target_config = match build_config.requested_target.as_ref() { + Some(triple) => TargetConfig::new(config, triple)?, + None => host_config.clone(), + }; + let (host_info, target_info) = { + let _p = profile::start("BuildContext::probe_target_info"); + debug!("probe_target_info"); + let host_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Host)?; + let target_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; + (host_info, target_info) + }; + + Ok(BuildContext { + ws, + resolve, + packages, + config, + rustc, + target_config, + target_info, + host_config, + host_info, + build_config, + profiles, + extra_compiler_args, + }) + } + + pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { + self.resolve + .extern_crate_name(unit.pkg.package_id(), dep.pkg.package_id(), dep.target) + } + + /// Whether a dependency should be compiled for the host or target platform, + /// specified by `Kind`. + pub fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + let platform = match dep.platform() { + Some(p) => p, + None => return true, + }; + let (name, info) = match kind { + Kind::Host => (self.host_triple(), &self.host_info), + Kind::Target => (self.target_triple(), &self.target_info), + }; + platform.matches(name, info.cfg()) + } + + /// Gets the user-specified linker for a particular host or target. + pub fn linker(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) + } + + /// Gets the user-specified `ar` program for a particular host or target. + pub fn ar(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) + } + + /// Gets the list of `cfg`s printed out from the compiler for the specified kind. + pub fn cfg(&self, kind: Kind) -> &[Cfg] { + let info = match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + }; + info.cfg().unwrap_or(&[]) + } + + /// Gets the host architecture triple. + /// + /// For example, x86_64-unknown-linux-gnu, would be + /// - machine: x86_64, + /// - hardware-platform: unknown, + /// - operating system: linux-gnu. + pub fn host_triple(&self) -> &str { + &self.rustc.host + } + + pub fn target_triple(&self) -> &str { + self.build_config + .requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or_else(|| self.host_triple()) + } + + /// Gets the target configuration for a particular host or target. + fn target_config(&self, kind: Kind) -> &TargetConfig { + match kind { + Kind::Host => &self.host_config, + Kind::Target => &self.target_config, + } + } + + /// Gets the number of jobs specified for this build. + pub fn jobs(&self) -> u32 { + self.build_config.jobs + } + + pub fn rustflags_args(&self, unit: &Unit<'_>) -> CargoResult> { + env_args( + self.config, + &self.build_config.requested_target, + self.host_triple(), + self.info(unit.kind).cfg(), + unit.kind, + "RUSTFLAGS", + ) + } + + pub fn rustdocflags_args(&self, unit: &Unit<'_>) -> CargoResult> { + env_args( + self.config, + &self.build_config.requested_target, + self.host_triple(), + self.info(unit.kind).cfg(), + unit.kind, + "RUSTDOCFLAGS", + ) + } + + pub fn show_warnings(&self, pkg: PackageId) -> bool { + pkg.source_id().is_path() || self.config.extra_verbose() + } + + fn info(&self, kind: Kind) -> &TargetInfo { + match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } + + pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { + self.extra_compiler_args.get(unit) + } +} + +/// Information required to build for a target. +#[derive(Clone, Default)] +pub struct TargetConfig { + /// The path of archiver (lib builder) for this target. + pub ar: Option, + /// The path of the linker for this target. + pub linker: Option, + /// Special build options for any necessary input files (filename -> options). + pub overrides: HashMap, +} + +impl TargetConfig { + pub fn new(config: &Config, triple: &str) -> CargoResult { + let key = format!("target.{}", triple); + let mut ret = TargetConfig { + ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val), + linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val), + overrides: HashMap::new(), + }; + let table = match config.get_table(&key)? { + Some(table) => table.val, + None => return Ok(ret), + }; + for (lib_name, value) in table { + match lib_name.as_str() { + "ar" | "linker" | "runner" | "rustflags" => continue, + _ => {} + } + + let mut output = BuildOutput { + library_paths: Vec::new(), + library_links: Vec::new(), + cfgs: Vec::new(), + env: Vec::new(), + metadata: Vec::new(), + rerun_if_changed: Vec::new(), + rerun_if_env_changed: Vec::new(), + warnings: Vec::new(), + }; + // We require deterministic order of evaluation, so we must sort the pairs by key first. + let mut pairs = Vec::new(); + for (k, value) in value.table(&lib_name)?.0 { + pairs.push((k, value)); + } + pairs.sort_by_key(|p| p.0); + for (k, value) in pairs { + let key = format!("{}.{}", key, k); + match &k[..] { + "rustc-flags" => { + let (flags, definition) = value.string(k)?; + let whence = format!("in `{}` (in {})", key, definition.display()); + let (paths, links) = BuildOutput::parse_rustc_flags(flags, &whence)?; + output.library_paths.extend(paths); + output.library_links.extend(links); + } + "rustc-link-lib" => { + let list = value.list(k)?; + output + .library_links + .extend(list.iter().map(|v| v.0.clone())); + } + "rustc-link-search" => { + let list = value.list(k)?; + output + .library_paths + .extend(list.iter().map(|v| PathBuf::from(&v.0))); + } + "rustc-cfg" => { + let list = value.list(k)?; + output.cfgs.extend(list.iter().map(|v| v.0.clone())); + } + "rustc-env" => { + for (name, val) in value.table(k)?.0 { + let val = val.string(name)?.0; + output.env.push((name.clone(), val.to_string())); + } + } + "warning" | "rerun-if-changed" | "rerun-if-env-changed" => { + failure::bail!("`{}` is not supported in build script overrides", k); + } + _ => { + let val = value.string(k)?.0; + output.metadata.push((k.clone(), val.to_string())); + } + } + } + ret.overrides.insert(lib_name, output); + } + + Ok(ret) + } +} + +/// Acquire extra flags to pass to the compiler from various locations. +/// +/// The locations are: +/// +/// - the `RUSTFLAGS` environment variable +/// +/// then if this was not found +/// +/// - `target.*.rustflags` from the manifest (Cargo.toml) +/// - `target.cfg(..).rustflags` from the manifest +/// +/// then if neither of these were found +/// +/// - `build.rustflags` from the manifest +/// +/// Note that if a `target` is specified, no args will be passed to host code (plugins, build +/// scripts, ...), even if it is the same as the target. +fn env_args( + config: &Config, + requested_target: &Option, + host_triple: &str, + target_cfg: Option<&[Cfg]>, + kind: Kind, + name: &str, +) -> CargoResult> { + // We *want* to apply RUSTFLAGS only to builds for the + // requested target architecture, and not to things like build + // scripts and plugins, which may be for an entirely different + // architecture. Cargo's present architecture makes it quite + // hard to only apply flags to things that are not build + // scripts and plugins though, so we do something more hacky + // instead to avoid applying the same RUSTFLAGS to multiple targets + // arches: + // + // 1) If --target is not specified we just apply RUSTFLAGS to + // all builds; they are all going to have the same target. + // + // 2) If --target *is* specified then we only apply RUSTFLAGS + // to compilation units with the Target kind, which indicates + // it was chosen by the --target flag. + // + // This means that, e.g., even if the specified --target is the + // same as the host, build scripts in plugins won't get + // RUSTFLAGS. + let compiling_with_target = requested_target.is_some(); + let is_target_kind = kind == Kind::Target; + + if compiling_with_target && !is_target_kind { + // This is probably a build script or plugin and we're + // compiling with --target. In this scenario there are + // no rustflags we can apply. + return Ok(Vec::new()); + } + + // First try RUSTFLAGS from the environment + if let Ok(a) = env::var(name) { + let args = a + .split(' ') + .map(str::trim) + .filter(|s| !s.is_empty()) + .map(str::to_string); + return Ok(args.collect()); + } + + let mut rustflags = Vec::new(); + + let name = name + .chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + // Then the target.*.rustflags value... + let target = requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(host_triple); + let key = format!("target.{}.{}", target, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + // ...including target.'cfg(...)'.rustflags + if let Some(target_cfg) = target_cfg { + if let Some(table) = config.get_table("target")? { + let cfgs = table + .val + .keys() + .filter(|key| CfgExpr::matches_key(key, target_cfg)); + + // Note that we may have multiple matching `[target]` sections and + // because we're passing flags to the compiler this can affect + // cargo's caching and whether it rebuilds. Ensure a deterministic + // ordering through sorting for now. We may perhaps one day wish to + // ensure a deterministic ordering via the order keys were defined + // in files perhaps. + let mut cfgs = cfgs.collect::>(); + cfgs.sort(); + + for n in cfgs { + let key = format!("target.{}.{}", n, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + } + } + } + + if !rustflags.is_empty() { + return Ok(rustflags); + } + + // Then the `build.rustflags` value. + let key = format!("build.{}", name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + return Ok(args.collect()); + } + + Ok(Vec::new()) +} diff --git a/src/cargo/core/compiler/build_context/target_info.rs b/src/cargo/core/compiler/build_context/target_info.rs new file mode 100644 index 000000000..7ae50eab1 --- /dev/null +++ b/src/cargo/core/compiler/build_context/target_info.rs @@ -0,0 +1,291 @@ +use std::cell::RefCell; +use std::collections::hash_map::{Entry, HashMap}; +use std::path::PathBuf; +use std::str::{self, FromStr}; + +use super::env_args; +use super::Kind; +use crate::core::TargetKind; +use crate::util::{CargoResult, CargoResultExt, Cfg, Config, ProcessBuilder, Rustc}; + +#[derive(Clone)] +pub struct TargetInfo { + crate_type_process: Option, + crate_types: RefCell>>, + cfg: Option>, + pub sysroot_libdir: Option, +} + +/// Type of each file generated by a Unit. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum FileFlavor { + /// Not a special file type. + Normal, + /// Something you can link against (e.g., a library). + Linkable, + /// Piece of external debug information (e.g., `.dSYM`/`.pdb` file). + DebugInfo, +} + +pub struct FileType { + pub flavor: FileFlavor, + suffix: String, + prefix: String, + // Wasm bin target will generate two files in deps such as + // "web-stuff.js" and "web_stuff.wasm". Note the different usages of + // "-" and "_". should_replace_hyphens is a flag to indicate that + // we need to convert the stem "web-stuff" to "web_stuff", so we + // won't miss "web_stuff.wasm". + should_replace_hyphens: bool, +} + +impl FileType { + pub fn filename(&self, stem: &str) -> String { + let stem = if self.should_replace_hyphens { + stem.replace("-", "_") + } else { + stem.to_string() + }; + format!("{}{}{}", self.prefix, stem, self.suffix) + } +} + +impl TargetInfo { + pub fn new( + config: &Config, + requested_target: &Option, + rustc: &Rustc, + kind: Kind, + ) -> CargoResult { + let rustflags = env_args( + config, + requested_target, + &rustc.host, + None, + kind, + "RUSTFLAGS", + )?; + let mut process = rustc.process(); + process + .arg("-") + .arg("--crate-name") + .arg("___") + .arg("--print=file-names") + .args(&rustflags) + .env_remove("RUST_LOG"); + + let target_triple = requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or(&rustc.host); + if kind == Kind::Target { + process.arg("--target").arg(target_triple); + } + + let crate_type_process = process.clone(); + const KNOWN_CRATE_TYPES: &[&str] = + &["bin", "rlib", "dylib", "cdylib", "staticlib", "proc-macro"]; + for crate_type in KNOWN_CRATE_TYPES.iter() { + process.arg("--crate-type").arg(crate_type); + } + + let mut with_cfg = process.clone(); + with_cfg.arg("--print=sysroot"); + with_cfg.arg("--print=cfg"); + + let mut has_cfg_and_sysroot = true; + let (output, error) = rustc + .cached_output(&with_cfg) + .or_else(|_| { + has_cfg_and_sysroot = false; + rustc.cached_output(&process) + }) + .chain_err(|| "failed to run `rustc` to learn about target-specific information")?; + + let mut lines = output.lines(); + let mut map = HashMap::new(); + for crate_type in KNOWN_CRATE_TYPES { + let out = parse_crate_type(crate_type, &error, &mut lines)?; + map.insert(crate_type.to_string(), out); + } + + let mut sysroot_libdir = None; + if has_cfg_and_sysroot { + let line = match lines.next() { + Some(line) => line, + None => failure::bail!( + "output of --print=sysroot missing when learning about \ + target-specific information from rustc" + ), + }; + let mut rustlib = PathBuf::from(line); + if kind == Kind::Host { + if cfg!(windows) { + rustlib.push("bin"); + } else { + rustlib.push("lib"); + } + sysroot_libdir = Some(rustlib); + } else { + rustlib.push("lib"); + rustlib.push("rustlib"); + rustlib.push(target_triple); + rustlib.push("lib"); + sysroot_libdir = Some(rustlib); + } + } + + let cfg = if has_cfg_and_sysroot { + Some(lines.map(Cfg::from_str).collect::>()?) + } else { + None + }; + + Ok(TargetInfo { + crate_type_process: Some(crate_type_process), + crate_types: RefCell::new(map), + cfg, + sysroot_libdir, + }) + } + + pub fn cfg(&self) -> Option<&[Cfg]> { + self.cfg.as_ref().map(|v| v.as_ref()) + } + + pub fn file_types( + &self, + crate_type: &str, + flavor: FileFlavor, + kind: &TargetKind, + target_triple: &str, + ) -> CargoResult>> { + let mut crate_types = self.crate_types.borrow_mut(); + let entry = crate_types.entry(crate_type.to_string()); + let crate_type_info = match entry { + Entry::Occupied(o) => &*o.into_mut(), + Entry::Vacant(v) => { + let value = self.discover_crate_type(v.key())?; + &*v.insert(value) + } + }; + let (prefix, suffix) = match *crate_type_info { + Some((ref prefix, ref suffix)) => (prefix, suffix), + None => return Ok(None), + }; + let mut ret = vec![FileType { + suffix: suffix.clone(), + prefix: prefix.clone(), + flavor, + should_replace_hyphens: false, + }]; + + // See rust-lang/cargo#4500. + if target_triple.ends_with("pc-windows-msvc") + && crate_type.ends_with("dylib") + && suffix == ".dll" + { + ret.push(FileType { + suffix: ".dll.lib".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Normal, + should_replace_hyphens: false, + }) + } + + // See rust-lang/cargo#4535. + if target_triple.starts_with("wasm32-") && crate_type == "bin" && suffix == ".js" { + ret.push(FileType { + suffix: ".wasm".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::Normal, + should_replace_hyphens: true, + }) + } + + // See rust-lang/cargo#4490, rust-lang/cargo#4960. + // - Only uplift debuginfo for binaries. + // Tests are run directly from `target/debug/deps/` + // and examples are inside target/debug/examples/ which already have symbols next to them, + // so no need to do anything. + if *kind == TargetKind::Bin { + if target_triple.contains("-apple-") { + ret.push(FileType { + suffix: ".dSYM".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::DebugInfo, + should_replace_hyphens: false, + }) + } else if target_triple.ends_with("-msvc") { + ret.push(FileType { + suffix: ".pdb".to_string(), + prefix: prefix.clone(), + flavor: FileFlavor::DebugInfo, + should_replace_hyphens: false, + }) + } + } + + Ok(Some(ret)) + } + + fn discover_crate_type(&self, crate_type: &str) -> CargoResult> { + let mut process = self.crate_type_process.clone().unwrap(); + + process.arg("--crate-type").arg(crate_type); + + let output = process.exec_with_output().chain_err(|| { + format!( + "failed to run `rustc` to learn about \ + crate-type {} information", + crate_type + ) + })?; + + let error = str::from_utf8(&output.stderr).unwrap(); + let output = str::from_utf8(&output.stdout).unwrap(); + Ok(parse_crate_type(crate_type, error, &mut output.lines())?) + } +} + +/// Takes rustc output (using specialized command line args), and calculates the file prefix and +/// suffix for the given crate type, or returns `None` if the type is not supported. (e.g., for a +/// Rust library like `libcargo.rlib`, we have prefix "lib" and suffix "rlib"). +/// +/// The caller needs to ensure that the lines object is at the correct line for the given crate +/// type: this is not checked. +// +// This function can not handle more than one file per type (with wasm32-unknown-emscripten, there +// are two files for bin (`.wasm` and `.js`)). +fn parse_crate_type( + crate_type: &str, + error: &str, + lines: &mut str::Lines<'_>, +) -> CargoResult> { + let not_supported = error.lines().any(|line| { + (line.contains("unsupported crate type") || line.contains("unknown crate type")) + && line.contains(crate_type) + }); + if not_supported { + return Ok(None); + } + let line = match lines.next() { + Some(line) => line, + None => failure::bail!( + "malformed output when learning about \ + crate-type {} information", + crate_type + ), + }; + let mut parts = line.trim().split("___"); + let prefix = parts.next().unwrap(); + let suffix = match parts.next() { + Some(part) => part, + None => failure::bail!( + "output of --print=file-names has changed in \ + the compiler, cannot parse" + ), + }; + + Ok(Some((prefix.to_string(), suffix.to_string()))) +} diff --git a/src/cargo/core/compiler/build_plan.rs b/src/cargo/core/compiler/build_plan.rs new file mode 100644 index 000000000..d6c79d004 --- /dev/null +++ b/src/cargo/core/compiler/build_plan.rs @@ -0,0 +1,162 @@ +//! A graph-like structure used to represent the rustc commands to build the package and the +//! interdependencies between them. +//! +//! The BuildPlan structure is used to store the dependency graph of a dry run so that it can be +//! shared with an external build system. Each Invocation in the BuildPlan comprises a single +//! subprocess and defines the build environment, the outputs produced by the subprocess, and the +//! dependencies on other Invocations. + +use std::collections::BTreeMap; +use std::path::PathBuf; + +use serde::Serialize; + +use super::context::OutputFile; +use super::{CompileMode, Context, Kind, Unit}; +use crate::core::TargetKind; +use crate::util::{internal, CargoResult, ProcessBuilder}; + +#[derive(Debug, Serialize)] +struct Invocation { + package_name: String, + package_version: semver::Version, + target_kind: TargetKind, + kind: Kind, + compile_mode: CompileMode, + deps: Vec, + outputs: Vec, + links: BTreeMap, + program: String, + args: Vec, + env: BTreeMap, + cwd: Option, +} + +#[derive(Debug)] +pub struct BuildPlan { + invocation_map: BTreeMap, + plan: SerializedBuildPlan, +} + +#[derive(Debug, Serialize)] +struct SerializedBuildPlan { + invocations: Vec, + inputs: Vec, +} + +impl Invocation { + pub fn new(unit: &Unit<'_>, deps: Vec) -> Invocation { + let id = unit.pkg.package_id(); + Invocation { + package_name: id.name().to_string(), + package_version: id.version().clone(), + kind: unit.kind, + target_kind: unit.target.kind().clone(), + compile_mode: unit.mode, + deps, + outputs: Vec::new(), + links: BTreeMap::new(), + program: String::new(), + args: Vec::new(), + env: BTreeMap::new(), + cwd: None, + } + } + + pub fn add_output(&mut self, path: &PathBuf, link: &Option) { + self.outputs.push(path.clone()); + if let Some(ref link) = *link { + self.links.insert(link.clone(), path.clone()); + } + } + + pub fn update_cmd(&mut self, cmd: &ProcessBuilder) -> CargoResult<()> { + self.program = cmd + .get_program() + .to_str() + .ok_or_else(|| failure::format_err!("unicode program string required"))? + .to_string(); + self.cwd = Some(cmd.get_cwd().unwrap().to_path_buf()); + for arg in cmd.get_args().iter() { + self.args.push( + arg.to_str() + .ok_or_else(|| failure::format_err!("unicode argument string required"))? + .to_string(), + ); + } + for (var, value) in cmd.get_envs() { + let value = match value { + Some(s) => s, + None => continue, + }; + self.env.insert( + var.clone(), + value + .to_str() + .ok_or_else(|| failure::format_err!("unicode environment value required"))? + .to_string(), + ); + } + Ok(()) + } +} + +impl BuildPlan { + pub fn new() -> BuildPlan { + BuildPlan { + invocation_map: BTreeMap::new(), + plan: SerializedBuildPlan::new(), + } + } + + pub fn add(&mut self, cx: &Context<'_, '_>, unit: &Unit<'_>) -> CargoResult<()> { + let id = self.plan.invocations.len(); + self.invocation_map.insert(unit.buildkey(), id); + let deps = cx + .dep_targets(&unit) + .iter() + .map(|dep| self.invocation_map[&dep.buildkey()]) + .collect(); + let invocation = Invocation::new(unit, deps); + self.plan.invocations.push(invocation); + Ok(()) + } + + pub fn update( + &mut self, + invocation_name: &str, + cmd: &ProcessBuilder, + outputs: &[OutputFile], + ) -> CargoResult<()> { + let id = self.invocation_map[invocation_name]; + let invocation = + self.plan.invocations.get_mut(id).ok_or_else(|| { + internal(format!("couldn't find invocation for {}", invocation_name)) + })?; + + invocation.update_cmd(cmd)?; + for output in outputs.iter() { + invocation.add_output(&output.path, &output.hardlink); + } + + Ok(()) + } + + pub fn set_inputs(&mut self, inputs: Vec) { + self.plan.inputs = inputs; + } + + pub fn output_plan(self) { + let encoded = serde_json::to_string(&self.plan).unwrap(); + println!("{}", encoded); + } +} + +impl SerializedBuildPlan { + pub fn new() -> SerializedBuildPlan { + SerializedBuildPlan { + invocations: Vec::new(), + inputs: Vec::new(), + } + } +} diff --git a/src/cargo/core/compiler/compilation.rs b/src/cargo/core/compiler/compilation.rs new file mode 100644 index 000000000..863affc7c --- /dev/null +++ b/src/cargo/core/compiler/compilation.rs @@ -0,0 +1,310 @@ +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::env; +use std::ffi::OsStr; +use std::path::PathBuf; + +use semver::Version; + +use super::BuildContext; +use crate::core::{Edition, Package, PackageId, Target, TargetKind}; +use crate::util::{self, join_paths, process, CargoResult, CfgExpr, Config, ProcessBuilder}; + +pub struct Doctest { + /// The package being doc-tested. + pub package: Package, + /// The target being tested (currently always the package's lib). + pub target: Target, + /// Extern dependencies needed by `rustdoc`. The path is the location of + /// the compiled lib. + pub deps: Vec<(String, PathBuf)>, +} + +/// A structure returning the result of a compilation. +pub struct Compilation<'cfg> { + /// An array of all tests created during this compilation. + pub tests: Vec<(Package, TargetKind, String, PathBuf)>, + + /// An array of all binaries created. + pub binaries: Vec, + + /// All directories for the output of native build commands. + /// + /// This is currently used to drive some entries which are added to the + /// LD_LIBRARY_PATH as appropriate. + /// + /// The order should be deterministic. + pub native_dirs: BTreeSet, + + /// Root output directory (for the local package's artifacts) + pub root_output: PathBuf, + + /// Output directory for rust dependencies. + /// May be for the host or for a specific target. + pub deps_output: PathBuf, + + /// Output directory for the rust host dependencies. + pub host_deps_output: PathBuf, + + /// The path to rustc's own libstd + pub host_dylib_path: Option, + + /// The path to libstd for the target + pub target_dylib_path: Option, + + /// Extra environment variables that were passed to compilations and should + /// be passed to future invocations of programs. + pub extra_env: HashMap>, + + /// Libraries to test with rustdoc. + pub to_doc_test: Vec, + + /// Features per package enabled during this compilation. + pub cfgs: HashMap>, + + /// Flags to pass to rustdoc when invoked from cargo test, per package. + pub rustdocflags: HashMap>, + + pub host: String, + pub target: String, + + config: &'cfg Config, + rustc_process: ProcessBuilder, + + target_runner: Option<(PathBuf, Vec)>, +} + +impl<'cfg> Compilation<'cfg> { + pub fn new<'a>(bcx: &BuildContext<'a, 'cfg>) -> CargoResult> { + // If we're using cargo as a rustc wrapper then we're in a situation + // like `cargo fix`. For now just disregard the `RUSTC_WRAPPER` env var + // (which is typically set to `sccache` for now). Eventually we'll + // probably want to implement `RUSTC_WRAPPER` for `cargo fix`, but we'll + // leave that open as a bug for now. + let mut rustc = if bcx.build_config.cargo_as_rustc_wrapper { + let mut rustc = bcx.rustc.process_no_wrapper(); + let prog = rustc.get_program().to_owned(); + rustc.env("RUSTC", prog); + rustc.program(env::current_exe()?); + rustc + } else { + bcx.rustc.process() + }; + if bcx.config.extra_verbose() { + rustc.display_env_vars(); + } + for (k, v) in bcx.build_config.extra_rustc_env.iter() { + rustc.env(k, v); + } + for arg in bcx.build_config.extra_rustc_args.iter() { + rustc.arg(arg); + } + let srv = bcx.build_config.rustfix_diagnostic_server.borrow(); + if let Some(server) = &*srv { + server.configure(&mut rustc); + } + Ok(Compilation { + // TODO: deprecated; remove. + native_dirs: BTreeSet::new(), + root_output: PathBuf::from("/"), + deps_output: PathBuf::from("/"), + host_deps_output: PathBuf::from("/"), + host_dylib_path: bcx.host_info.sysroot_libdir.clone(), + target_dylib_path: bcx.target_info.sysroot_libdir.clone(), + tests: Vec::new(), + binaries: Vec::new(), + extra_env: HashMap::new(), + to_doc_test: Vec::new(), + cfgs: HashMap::new(), + rustdocflags: HashMap::new(), + config: bcx.config, + rustc_process: rustc, + host: bcx.host_triple().to_string(), + target: bcx.target_triple().to_string(), + target_runner: target_runner(&bcx)?, + }) + } + + /// See `process`. + pub fn rustc_process(&self, pkg: &Package, target: &Target) -> CargoResult { + let mut p = self.fill_env(self.rustc_process.clone(), pkg, true)?; + if target.edition() != Edition::Edition2015 { + p.arg(format!("--edition={}", target.edition())); + } + Ok(p) + } + + /// See `process`. + pub fn rustdoc_process(&self, pkg: &Package, target: &Target) -> CargoResult { + let mut p = self.fill_env(process(&*self.config.rustdoc()?), pkg, false)?; + if target.edition() != Edition::Edition2015 { + p.arg(format!("--edition={}", target.edition())); + } + Ok(p) + } + + /// See `process`. + pub fn host_process>( + &self, + cmd: T, + pkg: &Package, + ) -> CargoResult { + self.fill_env(process(cmd), pkg, true) + } + + fn target_runner(&self) -> &Option<(PathBuf, Vec)> { + &self.target_runner + } + + /// See `process`. + pub fn target_process>( + &self, + cmd: T, + pkg: &Package, + ) -> CargoResult { + let builder = if let Some((ref runner, ref args)) = *self.target_runner() { + let mut builder = process(runner); + builder.args(args); + builder.arg(cmd); + builder + } else { + process(cmd) + }; + self.fill_env(builder, pkg, false) + } + + /// Prepares a new process with an appropriate environment to run against + /// the artifacts produced by the build process. + /// + /// The package argument is also used to configure environment variables as + /// well as the working directory of the child process. + fn fill_env( + &self, + mut cmd: ProcessBuilder, + pkg: &Package, + is_host: bool, + ) -> CargoResult { + let mut search_path = if is_host { + let mut search_path = vec![self.host_deps_output.clone()]; + search_path.extend(self.host_dylib_path.clone()); + search_path + } else { + let mut search_path = + super::filter_dynamic_search_path(self.native_dirs.iter(), &self.root_output); + search_path.push(self.deps_output.clone()); + search_path.push(self.root_output.clone()); + search_path.extend(self.target_dylib_path.clone()); + search_path + }; + + search_path.extend(util::dylib_path().into_iter()); + if cfg!(target_os = "macos") { + // These are the defaults when DYLD_FALLBACK_LIBRARY_PATH isn't + // set. Since Cargo is explicitly setting the value, make sure the + // defaults still work. + if let Ok(home) = env::var("HOME") { + search_path.push(PathBuf::from(home).join("lib")); + } + search_path.push(PathBuf::from("/usr/local/lib")); + search_path.push(PathBuf::from("/lib")); + search_path.push(PathBuf::from("/usr/lib")); + } + let search_path = join_paths(&search_path, util::dylib_path_envvar())?; + + cmd.env(util::dylib_path_envvar(), &search_path); + if let Some(env) = self.extra_env.get(&pkg.package_id()) { + for &(ref k, ref v) in env { + cmd.env(k, v); + } + } + + let metadata = pkg.manifest().metadata(); + + let cargo_exe = self.config.cargo_exe()?; + cmd.env(crate::CARGO_ENV, cargo_exe); + + // When adding new environment variables depending on + // crate properties which might require rebuild upon change + // consider adding the corresponding properties to the hash + // in BuildContext::target_metadata() + cmd.env("CARGO_MANIFEST_DIR", pkg.root()) + .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) + .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) + .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string()) + .env( + "CARGO_PKG_VERSION_PRE", + &pre_version_component(pkg.version()), + ) + .env("CARGO_PKG_VERSION", &pkg.version().to_string()) + .env("CARGO_PKG_NAME", &*pkg.name()) + .env( + "CARGO_PKG_DESCRIPTION", + metadata.description.as_ref().unwrap_or(&String::new()), + ) + .env( + "CARGO_PKG_HOMEPAGE", + metadata.homepage.as_ref().unwrap_or(&String::new()), + ) + .env( + "CARGO_PKG_REPOSITORY", + metadata.repository.as_ref().unwrap_or(&String::new()), + ) + .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":")) + .cwd(pkg.root()); + Ok(cmd) + } +} + +fn pre_version_component(v: &Version) -> String { + if v.pre.is_empty() { + return String::new(); + } + + let mut ret = String::new(); + + for (i, x) in v.pre.iter().enumerate() { + if i != 0 { + ret.push('.') + }; + ret.push_str(&x.to_string()); + } + + ret +} + +fn target_runner(bcx: &BuildContext<'_, '_>) -> CargoResult)>> { + let target = bcx.target_triple(); + + // try target.{}.runner + let key = format!("target.{}.runner", target); + if let Some(v) = bcx.config.get_path_and_args(&key)? { + return Ok(Some(v.val)); + } + + // try target.'cfg(...)'.runner + if let Some(target_cfg) = bcx.target_info.cfg() { + if let Some(table) = bcx.config.get_table("target")? { + let mut matching_runner = None; + + for key in table.val.keys() { + if CfgExpr::matches_key(key, target_cfg) { + let key = format!("target.{}.runner", key); + if let Some(runner) = bcx.config.get_path_and_args(&key)? { + // more than one match, error out + if matching_runner.is_some() { + failure::bail!( + "several matching instances of `target.'cfg(..)'.runner` \ + in `.cargo/config`" + ) + } + + matching_runner = Some(runner.val); + } + } + } + + return Ok(matching_runner); + } + } + + Ok(None) +} diff --git a/src/cargo/core/compiler/context/compilation_files.rs b/src/cargo/core/compiler/context/compilation_files.rs new file mode 100644 index 000000000..b84aad16c --- /dev/null +++ b/src/cargo/core/compiler/context/compilation_files.rs @@ -0,0 +1,523 @@ +use std::collections::HashMap; +use std::env; +use std::fmt; +use std::hash::{Hash, Hasher, SipHasher}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use lazycell::LazyCell; +use log::info; + +use super::{BuildContext, Context, FileFlavor, Kind, Layout, Unit}; +use crate::core::{TargetKind, Workspace}; +use crate::util::{self, CargoResult}; + +/// The `Metadata` is a hash used to make unique file names for each unit in a build. +/// For example: +/// - A project may depend on crate `A` and crate `B`, so the package name must be in the file name. +/// - Similarly a project may depend on two versions of `A`, so the version must be in the file name. +/// In general this must include all things that need to be distinguished in different parts of +/// the same build. This is absolutely required or we override things before +/// we get chance to use them. +/// +/// We use a hash because it is an easy way to guarantee +/// that all the inputs can be converted to a valid path. +/// +/// This also acts as the main layer of caching provided by Cargo. +/// For example, we want to cache `cargo build` and `cargo doc` separately, so that running one +/// does not invalidate the artifacts for the other. We do this by including `CompileMode` in the +/// hash, thus the artifacts go in different folders and do not override each other. +/// If we don't add something that we should have, for this reason, we get the +/// correct output but rebuild more than is needed. +/// +/// Some things that need to be tracked to ensure the correct output should definitely *not* +/// go in the `Metadata`. For example, the modification time of a file, should be tracked to make a +/// rebuild when the file changes. However, it would be wasteful to include in the `Metadata`. The +/// old artifacts are never going to be needed again. We can save space by just overwriting them. +/// If we add something that we should not have, for this reason, we get the correct output but take +/// more space than needed. This makes not including something in `Metadata` +/// a form of cache invalidation. +/// +/// Note that the `Fingerprint` is in charge of tracking everything needed to determine if a +/// rebuild is needed. +#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct Metadata(u64); + +impl fmt::Display for Metadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:016x}", self.0) + } +} + +pub struct CompilationFiles<'a, 'cfg: 'a> { + /// The target directory layout for the host (and target if it is the same as host). + pub(super) host: Layout, + /// The target directory layout for the target (if different from then host). + pub(super) target: Option, + /// Additional directory to include a copy of the outputs. + export_dir: Option, + /// The root targets requested by the user on the command line (does not + /// include dependencies). + roots: Vec>, + ws: &'a Workspace<'cfg>, + metas: HashMap, Option>, + /// For each Unit, a list all files produced. + outputs: HashMap, LazyCell>>>, +} + +#[derive(Debug)] +pub struct OutputFile { + /// Absolute path to the file that will be produced by the build process. + pub path: PathBuf, + /// If it should be linked into `target`, and what it should be called + /// (e.g., without metadata). + pub hardlink: Option, + /// If `--out-dir` is specified, the absolute path to the exported file. + pub export_path: Option, + /// Type of the file (library / debug symbol / else). + pub flavor: FileFlavor, +} + +impl OutputFile { + /// Gets the hard link if present; otherwise, returns the path. + pub fn bin_dst(&self) -> &PathBuf { + match self.hardlink { + Some(ref link_dst) => link_dst, + None => &self.path, + } + } +} + +impl<'a, 'cfg: 'a> CompilationFiles<'a, 'cfg> { + pub(super) fn new( + roots: &[Unit<'a>], + host: Layout, + target: Option, + export_dir: Option, + ws: &'a Workspace<'cfg>, + cx: &Context<'a, 'cfg>, + ) -> CompilationFiles<'a, 'cfg> { + let mut metas = HashMap::new(); + for unit in roots { + metadata_of(unit, cx, &mut metas); + } + let outputs = metas + .keys() + .cloned() + .map(|unit| (unit, LazyCell::new())) + .collect(); + CompilationFiles { + ws, + host, + target, + export_dir, + roots: roots.to_vec(), + metas, + outputs, + } + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn layout(&self, kind: Kind) -> &Layout { + match kind { + Kind::Host => &self.host, + Kind::Target => self.target.as_ref().unwrap_or(&self.host), + } + } + + /// Gets the metadata for a target in a specific profile. + /// We build to the path `"{filename}-{target_metadata}"`. + /// We use a linking step to link/copy to a predictable filename + /// like `target/debug/libfoo.{a,so,rlib}` and such. + pub fn metadata(&self, unit: &Unit<'a>) -> Option { + self.metas[unit].clone() + } + + /// Gets the short hash based only on the `PackageId`. + /// Used for the metadata when `target_metadata` returns `None`. + pub fn target_short_hash(&self, unit: &Unit<'_>) -> String { + let hashable = unit.pkg.package_id().stable_hash(self.ws.root()); + util::short_hash(&hashable) + } + + /// Returns the appropriate output directory for the specified package and + /// target. + pub fn out_dir(&self, unit: &Unit<'a>) -> PathBuf { + if unit.mode.is_doc() { + self.layout(unit.kind).root().parent().unwrap().join("doc") + } else if unit.target.is_custom_build() { + self.build_script_dir(unit) + } else if unit.target.is_example() { + self.layout(unit.kind).examples().to_path_buf() + } else { + self.deps_dir(unit).to_path_buf() + } + } + + pub fn export_dir(&self) -> Option { + self.export_dir.clone() + } + + pub fn pkg_dir(&self, unit: &Unit<'a>) -> String { + let name = unit.pkg.package_id().name(); + match self.metas[unit] { + Some(ref meta) => format!("{}-{}", name, meta), + None => format!("{}-{}", name, self.target_short_hash(unit)), + } + } + + /// Returns the root of the build output tree. + pub fn target_root(&self) -> &Path { + self.host.dest() + } + + pub fn host_deps(&self) -> &Path { + self.host.deps() + } + + /// Returns the directories where Rust crate dependencies are found for the + /// specified unit. + pub fn deps_dir(&self, unit: &Unit<'_>) -> &Path { + self.layout(unit.kind).deps() + } + + pub fn fingerprint_dir(&self, unit: &Unit<'a>) -> PathBuf { + let dir = self.pkg_dir(unit); + self.layout(unit.kind).fingerprint().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_dir(&self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(!unit.mode.is_run_custom_build()); + let dir = self.pkg_dir(unit); + self.layout(Kind::Host).build().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_out_dir(&self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(unit.mode.is_run_custom_build()); + let dir = self.pkg_dir(unit); + self.layout(unit.kind).build().join(dir).join("out") + } + + /// Returns the file stem for a given target/profile combo (with metadata). + pub fn file_stem(&self, unit: &Unit<'a>) -> String { + match self.metas[unit] { + Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), metadata), + None => self.bin_stem(unit), + } + } + + pub(super) fn outputs( + &self, + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + ) -> CargoResult>> { + self.outputs[unit] + .try_borrow_with(|| self.calc_outputs(unit, bcx)) + .map(Arc::clone) + } + + /// Returns the bin stem for a given target (without metadata). + fn bin_stem(&self, unit: &Unit<'_>) -> String { + if unit.target.allows_underscores() { + unit.target.name().to_string() + } else { + unit.target.crate_name() + } + } + + /// Returns a tuple with the directory and name of the hard link we expect + /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef + /// and link_stem would be out_dir/foo + /// This function returns it in two parts so the caller can add prefix/suffix + /// to filename separately. + /// + /// Returns an `Option` because in some cases we don't want to link + /// (eg a dependent lib). + fn link_stem(&self, unit: &Unit<'a>) -> Option<(PathBuf, String)> { + let out_dir = self.out_dir(unit); + let bin_stem = self.bin_stem(unit); + let file_stem = self.file_stem(unit); + + // We currently only lift files up from the `deps` directory. If + // it was compiled into something like `example/` or `doc/` then + // we don't want to link it up. + if out_dir.ends_with("deps") { + // Don't lift up library dependencies. + if unit.target.is_bin() || self.roots.contains(unit) { + Some(( + out_dir.parent().unwrap().to_owned(), + if unit.mode.is_any_test() { + file_stem + } else { + bin_stem + }, + )) + } else { + None + } + } else if bin_stem == file_stem { + None + } else if out_dir.ends_with("examples") || out_dir.parent().unwrap().ends_with("build") { + Some((out_dir, bin_stem)) + } else { + None + } + } + + fn calc_outputs( + &self, + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, + ) -> CargoResult>> { + let out_dir = self.out_dir(unit); + let file_stem = self.file_stem(unit); + let link_stem = self.link_stem(unit); + let info = if unit.kind == Kind::Host { + &bcx.host_info + } else { + &bcx.target_info + }; + + let mut ret = Vec::new(); + let mut unsupported = Vec::new(); + { + if unit.mode.is_check() { + // This may be confusing. rustc outputs a file named `lib*.rmeta` + // for both libraries and binaries. + let path = out_dir.join(format!("lib{}.rmeta", file_stem)); + ret.push(OutputFile { + path, + hardlink: None, + export_path: None, + flavor: FileFlavor::Linkable, + }); + } else { + let mut add = |crate_type: &str, flavor: FileFlavor| -> CargoResult<()> { + let crate_type = if crate_type == "lib" { + "rlib" + } else { + crate_type + }; + let file_types = info.file_types( + crate_type, + flavor, + unit.target.kind(), + bcx.target_triple(), + )?; + + match file_types { + Some(types) => { + for file_type in types { + let path = out_dir.join(file_type.filename(&file_stem)); + let hardlink = link_stem + .as_ref() + .map(|&(ref ld, ref ls)| ld.join(file_type.filename(ls))); + let export_path = if unit.target.is_custom_build() { + None + } else { + self.export_dir.as_ref().and_then(|export_dir| { + hardlink.as_ref().and_then(|hardlink| { + Some(export_dir.join(hardlink.file_name().unwrap())) + }) + }) + }; + ret.push(OutputFile { + path, + hardlink, + export_path, + flavor: file_type.flavor, + }); + } + } + // Not supported; don't worry about it. + None => { + unsupported.push(crate_type.to_string()); + } + } + Ok(()) + }; + // info!("{:?}", unit); + match *unit.target.kind() { + TargetKind::Bin + | TargetKind::CustomBuild + | TargetKind::ExampleBin + | TargetKind::Bench + | TargetKind::Test => { + add("bin", FileFlavor::Normal)?; + } + TargetKind::Lib(..) | TargetKind::ExampleLib(..) if unit.mode.is_any_test() => { + add("bin", FileFlavor::Normal)?; + } + TargetKind::ExampleLib(ref kinds) | TargetKind::Lib(ref kinds) => { + for kind in kinds { + add( + kind.crate_type(), + if kind.linkable() { + FileFlavor::Linkable + } else { + FileFlavor::Normal + }, + )?; + } + } + } + } + } + if ret.is_empty() { + if !unsupported.is_empty() { + failure::bail!( + "cannot produce {} for `{}` as the target `{}` \ + does not support these crate types", + unsupported.join(", "), + unit.pkg, + bcx.target_triple() + ) + } + failure::bail!( + "cannot compile `{}` as the target `{}` does not \ + support any of the output crate types", + unit.pkg, + bcx.target_triple() + ); + } + info!("Target filenames: {:?}", ret); + + Ok(Arc::new(ret)) + } +} + +fn metadata_of<'a, 'cfg>( + unit: &Unit<'a>, + cx: &Context<'a, 'cfg>, + metas: &mut HashMap, Option>, +) -> Option { + if !metas.contains_key(unit) { + let meta = compute_metadata(unit, cx, metas); + metas.insert(*unit, meta); + for unit in cx.dep_targets(unit) { + metadata_of(&unit, cx, metas); + } + } + metas[unit].clone() +} + +fn compute_metadata<'a, 'cfg>( + unit: &Unit<'a>, + cx: &Context<'a, 'cfg>, + metas: &mut HashMap, Option>, +) -> Option { + // No metadata for dylibs because of a couple issues: + // - macOS encodes the dylib name in the executable, + // - Windows rustc multiple files of which we can't easily link all of them. + // + // No metadata for bin because of an issue: + // - wasm32 rustc/emcc encodes the `.wasm` name in the `.js` (rust-lang/cargo#4535). + // + // Two exceptions: + // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict), + // 2) `__CARGO_DEFAULT_LIB_METADATA` env var. + // + // Note, however, that the compiler's build system at least wants + // path dependencies (eg libstd) to have hashes in filenames. To account for + // that we have an extra hack here which reads the + // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a + // hash in the filename if that's present. + // + // This environment variable should not be relied on! It's + // just here for rustbuild. We need a more principled method + // doing this eventually. + let bcx = &cx.bcx; + let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA"); + if !(unit.mode.is_any_test() || unit.mode.is_check()) + && (unit.target.is_dylib() + || unit.target.is_cdylib() + || (unit.target.is_bin() && bcx.target_triple().starts_with("wasm32-"))) + && unit.pkg.package_id().source_id().is_path() + && __cargo_default_lib_metadata.is_err() + { + return None; + } + + let mut hasher = SipHasher::new_with_keys(0, 0); + + // This is a generic version number that can be changed to make + // backwards-incompatible changes to any file structures in the output + // directory. For example, the fingerprint files or the build-script + // output files. Normally cargo updates ship with rustc updates which will + // cause a new hash due to the rustc version changing, but this allows + // cargo to be extra careful to deal with different versions of cargo that + // use the same rustc version. + 1.hash(&mut hasher); + + // Unique metadata per (name, source, version) triple. This'll allow us + // to pull crates from anywhere without worrying about conflicts. + unit.pkg + .package_id() + .stable_hash(bcx.ws.root()) + .hash(&mut hasher); + + // Add package properties which map to environment variables + // exposed by Cargo. + let manifest_metadata = unit.pkg.manifest().metadata(); + manifest_metadata.authors.hash(&mut hasher); + manifest_metadata.description.hash(&mut hasher); + manifest_metadata.homepage.hash(&mut hasher); + + // Also mix in enabled features to our metadata. This'll ensure that + // when changing feature sets each lib is separately cached. + bcx.resolve + .features_sorted(unit.pkg.package_id()) + .hash(&mut hasher); + + // Mix in the target-metadata of all the dependencies of this target. + { + let mut deps_metadata = cx + .dep_targets(unit) + .iter() + .map(|dep| metadata_of(dep, cx, metas)) + .collect::>(); + deps_metadata.sort(); + deps_metadata.hash(&mut hasher); + } + + // Throw in the profile we're compiling with. This helps caching + // `panic=abort` and `panic=unwind` artifacts, additionally with various + // settings like debuginfo and whatnot. + unit.profile.hash(&mut hasher); + unit.mode.hash(&mut hasher); + if let Some(ref args) = bcx.extra_args_for(unit) { + args.hash(&mut hasher); + } + + // Throw in the rustflags we're compiling with. + // This helps when the target directory is a shared cache for projects with different cargo configs, + // or if the user is experimenting with different rustflags manually. + if unit.mode.is_doc() { + cx.bcx.rustdocflags_args(unit).ok().hash(&mut hasher); + } else { + cx.bcx.rustflags_args(unit).ok().hash(&mut hasher); + } + + // Artifacts compiled for the host should have a different metadata + // piece than those compiled for the target, so make sure we throw in + // the unit's `kind` as well + unit.kind.hash(&mut hasher); + + // Finally throw in the target name/kind. This ensures that concurrent + // compiles of targets in the same crate don't collide. + unit.target.name().hash(&mut hasher); + unit.target.kind().hash(&mut hasher); + + bcx.rustc.verbose_version.hash(&mut hasher); + + // Seed the contents of `__CARGO_DEFAULT_LIB_METADATA` to the hasher if present. + // This should be the release channel, to get a different hash for each channel. + if let Ok(ref channel) = __cargo_default_lib_metadata { + channel.hash(&mut hasher); + } + Some(Metadata(hasher.finish())) +} diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs new file mode 100644 index 000000000..27f8adb20 --- /dev/null +++ b/src/cargo/core/compiler/context/mod.rs @@ -0,0 +1,568 @@ +#![allow(deprecated)] +use std::collections::{HashMap, HashSet}; +use std::ffi::OsStr; +use std::fmt::Write; +use std::path::PathBuf; +use std::sync::Arc; + +use jobserver::Client; + +use crate::core::compiler::compilation; +use crate::core::profiles::Profile; +use crate::core::{Package, PackageId, Resolve, Target}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{internal, profile, short_hash, Config}; + +use super::build_plan::BuildPlan; +use super::custom_build::{self, BuildDeps, BuildScripts, BuildState}; +use super::fingerprint::Fingerprint; +use super::job_queue::JobQueue; +use super::layout::Layout; +use super::{BuildContext, Compilation, CompileMode, Executor, FileFlavor, Kind}; + +mod unit_dependencies; +use self::unit_dependencies::build_unit_dependencies; + +mod compilation_files; +use self::compilation_files::CompilationFiles; +pub use self::compilation_files::{Metadata, OutputFile}; + +/// All information needed to define a unit. +/// +/// A unit is an object that has enough information so that cargo knows how to build it. +/// For example, if your package has dependencies, then every dependency will be built as a library +/// unit. If your package is a library, then it will be built as a library unit as well, or if it +/// is a binary with `main.rs`, then a binary will be output. There are also separate unit types +/// for `test`ing and `check`ing, amongst others. +/// +/// The unit also holds information about all possible metadata about the package in `pkg`. +/// +/// A unit needs to know extra information in addition to the type and root source file. For +/// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know +/// whether you want a debug or release build. There is enough information in this struct to figure +/// all that out. +#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, PartialOrd, Ord)] +pub struct Unit<'a> { + /// Information about available targets, which files to include/exclude, etc. Basically stuff in + /// `Cargo.toml`. + pub pkg: &'a Package, + /// Information about the specific target to build, out of the possible targets in `pkg`. Not + /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a + /// build. + pub target: &'a Target, + /// The profile contains information about *how* the build should be run, including debug + /// level, etc. + pub profile: Profile, + /// Whether this compilation unit is for the host or target architecture. + /// + /// For example, when + /// cross compiling and using a custom build script, the build script needs to be compiled for + /// the host architecture so the host rustc can use it (when compiling to the target + /// architecture). + pub kind: Kind, + /// The "mode" this unit is being compiled for. See [`CompileMode`] for more details. + pub mode: CompileMode, +} + +impl<'a> Unit<'a> { + pub fn buildkey(&self) -> String { + format!("{}-{}", self.pkg.name(), short_hash(self)) + } +} + +pub struct Context<'a, 'cfg: 'a> { + pub bcx: &'a BuildContext<'a, 'cfg>, + pub compilation: Compilation<'cfg>, + pub build_state: Arc, + pub build_script_overridden: HashSet<(PackageId, Kind)>, + pub build_explicit_deps: HashMap, BuildDeps>, + pub fingerprints: HashMap, Arc>, + pub compiled: HashSet>, + pub build_scripts: HashMap, Arc>, + pub links: Links, + pub jobserver: Client, + primary_packages: HashSet, + unit_dependencies: HashMap, Vec>>, + files: Option>, + package_cache: HashMap, +} + +impl<'a, 'cfg> Context<'a, 'cfg> { + pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { + // Load up the jobserver that we'll use to manage our parallelism. This + // is the same as the GNU make implementation of a jobserver, and + // intentionally so! It's hoped that we can interact with GNU make and + // all share the same jobserver. + // + // Note that if we don't have a jobserver in our environment then we + // create our own, and we create it with `n-1` tokens because one token + // is ourself, a running process. + let jobserver = match config.jobserver_from_env() { + Some(c) => c.clone(), + None => Client::new(bcx.build_config.jobs as usize - 1) + .chain_err(|| "failed to create jobserver")?, + }; + + Ok(Self { + bcx, + compilation: Compilation::new(bcx)?, + build_state: Arc::new(BuildState::new(&bcx.host_config, &bcx.target_config)), + fingerprints: HashMap::new(), + compiled: HashSet::new(), + build_scripts: HashMap::new(), + build_explicit_deps: HashMap::new(), + links: Links::new(), + jobserver, + build_script_overridden: HashSet::new(), + + primary_packages: HashSet::new(), + unit_dependencies: HashMap::new(), + files: None, + package_cache: HashMap::new(), + }) + } + + // Returns a mapping of the root package plus its immediate dependencies to + // where the compiled libraries are all located. + pub fn compile( + mut self, + units: &[Unit<'a>], + export_dir: Option, + exec: &Arc, + ) -> CargoResult> { + let mut queue = JobQueue::new(self.bcx); + let mut plan = BuildPlan::new(); + let build_plan = self.bcx.build_config.build_plan; + self.prepare_units(export_dir, units)?; + self.prepare()?; + custom_build::build_map(&mut self, units)?; + self.check_collistions()?; + + for unit in units.iter() { + // Build up a list of pending jobs, each of which represent + // compiling a particular package. No actual work is executed as + // part of this, that's all done next as part of the `execute` + // function which will run everything in order with proper + // parallelism. + let force_rebuild = self.bcx.build_config.force_rebuild; + super::compile(&mut self, &mut queue, &mut plan, unit, exec, force_rebuild)?; + } + + // Now that we've figured out everything that we're going to do, do it! + queue.execute(&mut self, &mut plan)?; + + if build_plan { + plan.set_inputs(self.build_plan_inputs()?); + plan.output_plan(); + } + + for unit in units.iter() { + for output in self.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + + let bindst = output.bin_dst(); + + if unit.mode == CompileMode::Test { + self.compilation.tests.push(( + unit.pkg.clone(), + unit.target.kind().clone(), + unit.target.name().to_string(), + output.path.clone(), + )); + } else if unit.target.is_bin() || unit.target.is_bin_example() { + self.compilation.binaries.push(bindst.clone()); + } + } + + for dep in self.dep_targets(unit).iter() { + if !unit.target.is_lib() { + continue; + } + + if dep.mode.is_run_custom_build() { + let out_dir = self.files().build_script_out_dir(dep).display().to_string(); + self.compilation + .extra_env + .entry(dep.pkg.package_id()) + .or_insert_with(Vec::new) + .push(("OUT_DIR".to_string(), out_dir)); + } + } + + if unit.mode == CompileMode::Doctest { + // Note that we can *only* doc-test rlib outputs here. A + // staticlib output cannot be linked by the compiler (it just + // doesn't do that). A dylib output, however, can be linked by + // the compiler, but will always fail. Currently all dylibs are + // built as "static dylibs" where the standard library is + // statically linked into the dylib. The doc tests fail, + // however, for now as they try to link the standard library + // dynamically as well, causing problems. As a result we only + // pass `--extern` for rlib deps and skip out on all other + // artifacts. + let mut doctest_deps = Vec::new(); + for dep in self.dep_targets(unit) { + if dep.target.is_lib() && dep.mode == CompileMode::Build { + let outputs = self.outputs(&dep)?; + let outputs = outputs.iter().filter(|output| { + output.path.extension() == Some(OsStr::new("rlib")) + || dep.target.for_host() + }); + for output in outputs { + doctest_deps.push(( + self.bcx.extern_crate_name(unit, &dep)?, + output.path.clone(), + )); + } + } + } + // Help with tests to get a stable order with renamed deps. + doctest_deps.sort(); + self.compilation.to_doc_test.push(compilation::Doctest { + package: unit.pkg.clone(), + target: unit.target.clone(), + deps: doctest_deps, + }); + } + + let feats = self.bcx.resolve.features(unit.pkg.package_id()); + if !feats.is_empty() { + self.compilation + .cfgs + .entry(unit.pkg.package_id()) + .or_insert_with(|| { + feats + .iter() + .map(|feat| format!("feature=\"{}\"", feat)) + .collect() + }); + } + let rustdocflags = self.bcx.rustdocflags_args(unit)?; + if !rustdocflags.is_empty() { + self.compilation + .rustdocflags + .entry(unit.pkg.package_id()) + .or_insert(rustdocflags); + } + + super::output_depinfo(&mut self, unit)?; + } + + for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() { + self.compilation + .cfgs + .entry(pkg.clone()) + .or_insert_with(HashSet::new) + .extend(output.cfgs.iter().cloned()); + + self.compilation + .extra_env + .entry(pkg.clone()) + .or_insert_with(Vec::new) + .extend(output.env.iter().cloned()); + + for dir in output.library_paths.iter() { + self.compilation.native_dirs.insert(dir.clone()); + } + } + Ok(self.compilation) + } + + /// Returns the executable for the specified unit (if any). + pub fn get_executable(&mut self, unit: &Unit<'a>) -> CargoResult> { + for output in self.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + + let is_binary = unit.target.is_bin() || unit.target.is_bin_example(); + let is_test = unit.mode.is_any_test() && !unit.mode.is_check(); + + if is_binary || is_test { + return Ok(Option::Some(output.bin_dst().clone())); + } + } + Ok(None) + } + + pub fn prepare_units( + &mut self, + export_dir: Option, + units: &[Unit<'a>], + ) -> CargoResult<()> { + let dest = if self.bcx.build_config.release { + "release" + } else { + "debug" + }; + let host_layout = Layout::new(self.bcx.ws, None, dest)?; + let target_layout = match self.bcx.build_config.requested_target.as_ref() { + Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?), + None => None, + }; + self.primary_packages + .extend(units.iter().map(|u| u.pkg.package_id())); + + build_unit_dependencies( + units, + self.bcx, + &mut self.unit_dependencies, + &mut self.package_cache, + )?; + let files = CompilationFiles::new( + units, + host_layout, + target_layout, + export_dir, + self.bcx.ws, + self, + ); + self.files = Some(files); + Ok(()) + } + + /// Prepare this context, ensuring that all filesystem directories are in + /// place. + pub fn prepare(&mut self) -> CargoResult<()> { + let _p = profile::start("preparing layout"); + + self.files_mut() + .host + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + if let Some(ref mut target) = self.files.as_mut().unwrap().target { + target + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + } + + self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf(); + + let files = self.files.as_ref().unwrap(); + let layout = files.target.as_ref().unwrap_or(&files.host); + self.compilation.root_output = layout.dest().to_path_buf(); + self.compilation.deps_output = layout.deps().to_path_buf(); + Ok(()) + } + + pub fn files(&self) -> &CompilationFiles<'a, 'cfg> { + self.files.as_ref().unwrap() + } + + fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> { + self.files.as_mut().unwrap() + } + + /// Returns the filenames that the given unit will generate. + pub fn outputs(&self, unit: &Unit<'a>) -> CargoResult>> { + self.files.as_ref().unwrap().outputs(unit, self.bcx) + } + + /// For a package, return all targets which are registered as dependencies + /// for that package. + // + // TODO: this ideally should be `-> &[Unit<'a>]`. + pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec> { + // If this build script's execution has been overridden then we don't + // actually depend on anything, we've reached the end of the dependency + // chain as we've got all the info we're gonna get. + // + // Note there's a subtlety about this piece of code! The + // `build_script_overridden` map here is populated in + // `custom_build::build_map` which you need to call before inspecting + // dependencies. However, that code itself calls this method and + // gets a full pre-filtered set of dependencies. This is not super + // obvious, and clear, but it does work at the moment. + if unit.target.is_custom_build() { + let key = (unit.pkg.package_id(), unit.kind); + if self.build_script_overridden.contains(&key) { + return Vec::new(); + } + } + let mut deps = self.unit_dependencies[unit].clone(); + deps.sort(); + deps + } + + pub fn is_primary_package(&self, unit: &Unit<'a>) -> bool { + self.primary_packages.contains(&unit.pkg.package_id()) + } + + /// Gets a package for the given package ID. + pub fn get_package(&self, id: PackageId) -> CargoResult<&'a Package> { + self.package_cache + .get(&id) + .cloned() + .ok_or_else(|| failure::format_err!("failed to find {}", id)) + } + + /// Returns the list of filenames read by cargo to generate the `BuildContext` + /// (all `Cargo.toml`, etc.). + pub fn build_plan_inputs(&self) -> CargoResult> { + let mut inputs = Vec::new(); + // Note that we're using the `package_cache`, which should have been + // populated by `build_unit_dependencies`, and only those packages are + // considered as all the inputs. + // + // (Notably, we skip dev-deps here if they aren't present.) + for pkg in self.package_cache.values() { + inputs.push(pkg.manifest_path().to_path_buf()); + } + inputs.sort(); + Ok(inputs) + } + + fn check_collistions(&self) -> CargoResult<()> { + let mut output_collisions = HashMap::new(); + let describe_collision = + |unit: &Unit<'_>, other_unit: &Unit<'_>, path: &PathBuf| -> String { + format!( + "The {} target `{}` in package `{}` has the same output \ + filename as the {} target `{}` in package `{}`.\n\ + Colliding filename is: {}\n", + unit.target.kind().description(), + unit.target.name(), + unit.pkg.package_id(), + other_unit.target.kind().description(), + other_unit.target.name(), + other_unit.pkg.package_id(), + path.display() + ) + }; + let suggestion = "Consider changing their names to be unique or compiling them separately.\n\ + This may become a hard error in the future; see \ + ."; + let report_collision = |unit: &Unit<'_>, + other_unit: &Unit<'_>, + path: &PathBuf| + -> CargoResult<()> { + if unit.target.name() == other_unit.target.name() { + self.bcx.config.shell().warn(format!( + "output filename collision.\n\ + {}\ + The targets should have unique names.\n\ + {}", + describe_collision(unit, other_unit, path), + suggestion + )) + } else { + self.bcx.config.shell().warn(format!( + "output filename collision.\n\ + {}\ + The output filenames should be unique.\n\ + {}\n\ + If this looks unexpected, it may be a bug in Cargo. Please file a bug report at\n\ + https://github.com/rust-lang/cargo/issues/ with as much information as you\n\ + can provide.\n\ + {} running on `{}` target `{}`\n\ + First unit: {:?}\n\ + Second unit: {:?}", + describe_collision(unit, other_unit, path), + suggestion, + crate::version(), self.bcx.host_triple(), self.bcx.target_triple(), + unit, other_unit)) + } + }; + let mut keys = self + .unit_dependencies + .keys() + .filter(|unit| !unit.mode.is_run_custom_build()) + .collect::>(); + // Sort for consistent error messages. + keys.sort_unstable(); + for unit in keys { + for output in self.outputs(unit)?.iter() { + if let Some(other_unit) = output_collisions.insert(output.path.clone(), unit) { + report_collision(unit, &other_unit, &output.path)?; + } + if let Some(hardlink) = output.hardlink.as_ref() { + if let Some(other_unit) = output_collisions.insert(hardlink.clone(), unit) { + report_collision(unit, &other_unit, hardlink)?; + } + } + if let Some(ref export_path) = output.export_path { + if let Some(other_unit) = output_collisions.insert(export_path.clone(), unit) { + self.bcx.config.shell().warn(format!( + "`--out-dir` filename collision.\n\ + {}\ + The exported filenames should be unique.\n\ + {}", + describe_collision(unit, &other_unit, &export_path), + suggestion + ))?; + } + } + } + } + Ok(()) + } +} + +#[derive(Default)] +pub struct Links { + validated: HashSet, + links: HashMap, +} + +impl Links { + pub fn new() -> Links { + Links { + validated: HashSet::new(), + links: HashMap::new(), + } + } + + pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'_>) -> CargoResult<()> { + if !self.validated.insert(unit.pkg.package_id()) { + return Ok(()); + } + let lib = match unit.pkg.manifest().links() { + Some(lib) => lib, + None => return Ok(()), + }; + if let Some(&prev) = self.links.get(lib) { + let pkg = unit.pkg.package_id(); + + let describe_path = |pkgid: PackageId| -> String { + let dep_path = resolve.path_to_top(&pkgid); + let mut dep_path_desc = format!("package `{}`", dep_path[0]); + for dep in dep_path.iter().skip(1) { + write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); + } + dep_path_desc + }; + + failure::bail!( + "multiple packages link to native library `{}`, \ + but a native library can be linked only once\n\ + \n\ + {}\nlinks to native library `{}`\n\ + \n\ + {}\nalso links to native library `{}`", + lib, + describe_path(prev), + lib, + describe_path(pkg), + lib + ) + } + if !unit + .pkg + .manifest() + .targets() + .iter() + .any(|t| t.is_custom_build()) + { + failure::bail!( + "package `{}` specifies that it links to `{}` but does not \ + have a custom build script", + unit.pkg.package_id(), + lib + ) + } + self.links.insert(lib.to_string(), unit.pkg.package_id()); + Ok(()) + } +} diff --git a/src/cargo/core/compiler/context/unit_dependencies.rs b/src/cargo/core/compiler/context/unit_dependencies.rs new file mode 100644 index 000000000..7ba1aaa38 --- /dev/null +++ b/src/cargo/core/compiler/context/unit_dependencies.rs @@ -0,0 +1,543 @@ +//! Constructs the dependency graph for compilation. +//! +//! Rust code is typically organized as a set of Cargo packages. The +//! dependencies between the packages themselves are stored in the +//! `Resolve` struct. However, we can't use that information as is for +//! compilation! A package typically contains several targets, or crates, +//! and these targets has inter-dependencies. For example, you need to +//! compile the `lib` target before the `bin` one, and you need to compile +//! `build.rs` before either of those. +//! +//! So, we need to lower the `Resolve`, which specifies dependencies between +//! *packages*, to a graph of dependencies between their *targets*, and this +//! is exactly what this module is doing! Well, almost exactly: another +//! complication is that we might want to compile the same target several times +//! (for example, with and without tests), so we actually build a dependency +//! graph of `Unit`s, which capture these properties. + +use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; + +use log::trace; + +use super::{BuildContext, CompileMode, Kind, Unit}; +use crate::core::dependency::Kind as DepKind; +use crate::core::package::Downloads; +use crate::core::profiles::UnitFor; +use crate::core::{Package, PackageId, Target}; +use crate::CargoResult; + +struct State<'a: 'tmp, 'cfg: 'a, 'tmp> { + bcx: &'tmp BuildContext<'a, 'cfg>, + deps: &'tmp mut HashMap, Vec>>, + pkgs: RefCell<&'tmp mut HashMap>, + waiting_on_download: HashSet, + downloads: Downloads<'a, 'cfg>, +} + +pub fn build_unit_dependencies<'a, 'cfg>( + roots: &[Unit<'a>], + bcx: &BuildContext<'a, 'cfg>, + deps: &mut HashMap, Vec>>, + pkgs: &mut HashMap, +) -> CargoResult<()> { + assert!(deps.is_empty(), "can only build unit deps once"); + + let mut state = State { + bcx, + deps, + pkgs: RefCell::new(pkgs), + waiting_on_download: HashSet::new(), + downloads: bcx.packages.enable_download()?, + }; + + loop { + for unit in roots.iter() { + state.get(unit.pkg.package_id())?; + + // Dependencies of tests/benches should not have `panic` set. + // We check the global test mode to see if we are running in `cargo + // test` in which case we ensure all dependencies have `panic` + // cleared, and avoid building the lib thrice (once with `panic`, once + // without, once for `--test`). In particular, the lib included for + // Doc tests and examples are `Build` mode here. + let unit_for = if unit.mode.is_any_test() || bcx.build_config.test() { + UnitFor::new_test() + } else if unit.target.is_custom_build() { + // This normally doesn't happen, except `clean` aggressively + // generates all units. + UnitFor::new_build() + } else if unit.target.for_host() { + // Proc macro / plugin should never have panic set. + UnitFor::new_compiler() + } else { + UnitFor::new_normal() + }; + deps_of(unit, &mut state, unit_for)?; + } + + if !state.waiting_on_download.is_empty() { + state.finish_some_downloads()?; + state.deps.clear(); + } else { + break; + } + } + trace!("ALL UNIT DEPENDENCIES {:#?}", state.deps); + + connect_run_custom_build_deps(&mut state); + + Ok(()) +} + +fn deps_of<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, + unit_for: UnitFor, +) -> CargoResult<()> { + // Currently the `deps` map does not include `unit_for`. This should + // be safe for now. `TestDependency` only exists to clear the `panic` + // flag, and you'll never ask for a `unit` with `panic` set as a + // `TestDependency`. `CustomBuild` should also be fine since if the + // requested unit's settings are the same as `Any`, `CustomBuild` can't + // affect anything else in the hierarchy. + if !state.deps.contains_key(unit) { + let unit_deps = compute_deps(unit, state, unit_for)?; + let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect(); + state.deps.insert(*unit, to_insert); + for (unit, unit_for) in unit_deps { + deps_of(&unit, state, unit_for)?; + } + } + Ok(()) +} + +/// For a package, returns all targets that are registered as dependencies +/// for that package. +/// This returns a `Vec` of `(Unit, UnitFor)` pairs. The `UnitFor` +/// is the profile type that should be used for dependencies of the unit. +fn compute_deps<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, + unit_for: UnitFor, +) -> CargoResult, UnitFor)>> { + if unit.mode.is_run_custom_build() { + return compute_deps_custom_build(unit, state.bcx); + } else if unit.mode.is_doc() && !unit.mode.is_any_test() { + // Note: this does not include doc test. + return compute_deps_doc(unit, state); + } + + let bcx = state.bcx; + let id = unit.pkg.package_id(); + let deps = bcx.resolve.deps(id).filter(|&(_id, deps)| { + assert!(!deps.is_empty()); + deps.iter().any(|dep| { + // If this target is a build command, then we only want build + // dependencies, otherwise we want everything *other than* build + // dependencies. + if unit.target.is_custom_build() != dep.is_build() { + return false; + } + + // If this dependency is **not** a transitive dependency, then it + // only applies to test/example targets. + if !dep.is_transitive() + && !unit.target.is_test() + && !unit.target.is_example() + && !unit.mode.is_any_test() + { + return false; + } + + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + if !bcx.dep_platform_activated(dep, unit.kind) { + return false; + } + + // If the dependency is optional, then we're only activating it + // if the corresponding feature was activated + if dep.is_optional() && !bcx.resolve.features(id).contains(&*dep.name_in_toml()) { + return false; + } + + // If we've gotten past all that, then this dependency is + // actually used! + true + }) + }); + + let mut ret = Vec::new(); + for (id, _) in deps { + let pkg = match state.get(id)? { + Some(pkg) => pkg, + None => continue, + }; + let lib = match pkg.targets().iter().find(|t| t.is_lib()) { + Some(t) => t, + None => continue, + }; + let mode = check_or_build_mode(unit.mode, lib); + let dep_unit_for = unit_for.with_for_host(lib.for_host()); + + if bcx.config.cli_unstable().dual_proc_macros + && lib.proc_macro() + && unit.kind == Kind::Target + { + let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Target, mode); + ret.push((unit, dep_unit_for)); + let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Host, mode); + ret.push((unit, dep_unit_for)); + } else { + let unit = new_unit(bcx, pkg, lib, dep_unit_for, unit.kind.for_target(lib), mode); + ret.push((unit, dep_unit_for)); + } + } + + // If this target is a build script, then what we've collected so far is + // all we need. If this isn't a build script, then it depends on the + // build script if there is one. + if unit.target.is_custom_build() { + return Ok(ret); + } + ret.extend(dep_build_script(unit, bcx)); + + // If this target is a binary, test, example, etc, then it depends on + // the library of the same package. The call to `resolve.deps` above + // didn't include `pkg` in the return values, so we need to special case + // it here and see if we need to push `(pkg, pkg_lib_target)`. + if unit.target.is_lib() && unit.mode != CompileMode::Doctest { + return Ok(ret); + } + ret.extend(maybe_lib(unit, bcx, unit_for)); + + // If any integration tests/benches are being run, make sure that + // binaries are built as well. + if !unit.mode.is_check() + && unit.mode.is_any_test() + && (unit.target.is_test() || unit.target.is_bench()) + { + ret.extend( + unit.pkg + .targets() + .iter() + .filter(|t| { + let no_required_features = Vec::new(); + + t.is_bin() && + // Skip binaries with required features that have not been selected. + t.required_features().unwrap_or(&no_required_features).iter().all(|f| { + bcx.resolve.features(id).contains(f) + }) + }) + .map(|t| { + ( + new_unit( + bcx, + unit.pkg, + t, + UnitFor::new_normal(), + unit.kind.for_target(t), + CompileMode::Build, + ), + UnitFor::new_normal(), + ) + }), + ); + } + + Ok(ret) +} + +/// Returns the dependencies needed to run a build script. +/// +/// The `unit` provided must represent an execution of a build script, and +/// the returned set of units must all be run before `unit` is run. +fn compute_deps_custom_build<'a, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, +) -> CargoResult, UnitFor)>> { + // When not overridden, then the dependencies to run a build script are: + // + // 1. Compiling the build script itself. + // 2. For each immediate dependency of our package which has a `links` + // key, the execution of that build script. + // + // We don't have a great way of handling (2) here right now so this is + // deferred until after the graph of all unit dependencies has been + // constructed. + let unit = new_unit( + bcx, + unit.pkg, + unit.target, + UnitFor::new_build(), + // Build scripts always compiled for the host. + Kind::Host, + CompileMode::Build, + ); + // All dependencies of this unit should use profiles for custom + // builds. + Ok(vec![(unit, UnitFor::new_build())]) +} + +/// Returns the dependencies necessary to document a package. +fn compute_deps_doc<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, +) -> CargoResult, UnitFor)>> { + let bcx = state.bcx; + let deps = bcx + .resolve + .deps(unit.pkg.package_id()) + .filter(|&(_id, deps)| { + deps.iter().any(|dep| match dep.kind() { + DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind), + _ => false, + }) + }); + + // To document a library, we depend on dependencies actually being + // built. If we're documenting *all* libraries, then we also depend on + // the documentation of the library being built. + let mut ret = Vec::new(); + for (id, _deps) in deps { + let dep = match state.get(id)? { + Some(dep) => dep, + None => continue, + }; + let lib = match dep.targets().iter().find(|t| t.is_lib()) { + Some(lib) => lib, + None => continue, + }; + // Rustdoc only needs rmeta files for regular dependencies. + // However, for plugins/proc macros, deps should be built like normal. + let mode = check_or_build_mode(unit.mode, lib); + let dep_unit_for = UnitFor::new_normal().with_for_host(lib.for_host()); + let lib_unit = new_unit(bcx, dep, lib, dep_unit_for, unit.kind.for_target(lib), mode); + ret.push((lib_unit, dep_unit_for)); + if let CompileMode::Doc { deps: true } = unit.mode { + // Document this lib as well. + let doc_unit = new_unit( + bcx, + dep, + lib, + dep_unit_for, + unit.kind.for_target(lib), + unit.mode, + ); + ret.push((doc_unit, dep_unit_for)); + } + } + + // Be sure to build/run the build script for documented libraries. + ret.extend(dep_build_script(unit, bcx)); + + // If we document a binary, we need the library available. + if unit.target.is_bin() { + ret.extend(maybe_lib(unit, bcx, UnitFor::new_normal())); + } + Ok(ret) +} + +fn maybe_lib<'a>( + unit: &Unit<'a>, + bcx: &BuildContext<'_, '_>, + unit_for: UnitFor, +) -> Option<(Unit<'a>, UnitFor)> { + unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { + let mode = check_or_build_mode(unit.mode, t); + let unit = new_unit(bcx, unit.pkg, t, unit_for, unit.kind.for_target(t), mode); + (unit, unit_for) + }) +} + +/// If a build script is scheduled to be run for the package specified by +/// `unit`, this function will return the unit to run that build script. +/// +/// Overriding a build script simply means that the running of the build +/// script itself doesn't have any dependencies, so even in that case a unit +/// of work is still returned. `None` is only returned if the package has no +/// build script. +fn dep_build_script<'a>( + unit: &Unit<'a>, + bcx: &BuildContext<'_, '_>, +) -> Option<(Unit<'a>, UnitFor)> { + unit.pkg + .targets() + .iter() + .find(|t| t.is_custom_build()) + .map(|t| { + // The profile stored in the Unit is the profile for the thing + // the custom build script is running for. + ( + Unit { + pkg: unit.pkg, + target: t, + profile: bcx.profiles.get_profile_run_custom_build(&unit.profile), + kind: unit.kind, + mode: CompileMode::RunCustomBuild, + }, + UnitFor::new_build(), + ) + }) +} + +/// Choose the correct mode for dependencies. +fn check_or_build_mode(mode: CompileMode, target: &Target) -> CompileMode { + match mode { + CompileMode::Check { .. } | CompileMode::Doc { .. } => { + if target.for_host() { + // Plugin and proc macro targets should be compiled like + // normal. + CompileMode::Build + } else { + // Regular dependencies should not be checked with --test. + // Regular dependencies of doc targets should emit rmeta only. + CompileMode::Check { test: false } + } + } + _ => CompileMode::Build, + } +} + +fn new_unit<'a>( + bcx: &BuildContext<'_, '_>, + pkg: &'a Package, + target: &'a Target, + unit_for: UnitFor, + kind: Kind, + mode: CompileMode, +) -> Unit<'a> { + let profile = bcx.profiles.get_profile( + pkg.package_id(), + bcx.ws.is_member(pkg), + unit_for, + mode, + bcx.build_config.release, + ); + Unit { + pkg, + target, + profile, + kind, + mode, + } +} + +/// Fill in missing dependencies for units of the `RunCustomBuild` +/// +/// As mentioned above in `compute_deps_custom_build` each build script +/// execution has two dependencies. The first is compiling the build script +/// itself (already added) and the second is that all crates the package of the +/// build script depends on with `links` keys, their build script execution. (a +/// bit confusing eh?) +/// +/// Here we take the entire `deps` map and add more dependencies from execution +/// of one build script to execution of another build script. +fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) { + let mut new_deps = Vec::new(); + + { + // First up build a reverse dependency map. This is a mapping of all + // `RunCustomBuild` known steps to the unit which depends on them. For + // example a library might depend on a build script, so this map will + // have the build script as the key and the library would be in the + // value's set. + let mut reverse_deps = HashMap::new(); + for (unit, deps) in state.deps.iter() { + for dep in deps { + if dep.mode == CompileMode::RunCustomBuild { + reverse_deps + .entry(dep) + .or_insert_with(HashSet::new) + .insert(unit); + } + } + } + + // Next, we take a look at all build scripts executions listed in the + // dependency map. Our job here is to take everything that depends on + // this build script (from our reverse map above) and look at the other + // package dependencies of these parents. + // + // If we depend on a linkable target and the build script mentions + // `links`, then we depend on that package's build script! Here we use + // `dep_build_script` to manufacture an appropriate build script unit to + // depend on. + for unit in state + .deps + .keys() + .filter(|k| k.mode == CompileMode::RunCustomBuild) + { + let reverse_deps = match reverse_deps.get(unit) { + Some(set) => set, + None => continue, + }; + + let to_add = reverse_deps + .iter() + .flat_map(|reverse_dep| state.deps[reverse_dep].iter()) + .filter(|other| { + other.pkg != unit.pkg + && other.target.linkable() + && other.pkg.manifest().links().is_some() + }) + .filter_map(|other| dep_build_script(other, state.bcx).map(|p| p.0)) + .collect::>(); + + if !to_add.is_empty() { + new_deps.push((*unit, to_add)); + } + } + } + + // And finally, add in all the missing dependencies! + for (unit, new_deps) in new_deps { + state.deps.get_mut(&unit).unwrap().extend(new_deps); + } +} + +impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> { + fn get(&mut self, id: PackageId) -> CargoResult> { + let mut pkgs = self.pkgs.borrow_mut(); + if let Some(pkg) = pkgs.get(&id) { + return Ok(Some(pkg)); + } + if !self.waiting_on_download.insert(id) { + return Ok(None); + } + if let Some(pkg) = self.downloads.start(id)? { + pkgs.insert(id, pkg); + self.waiting_on_download.remove(&id); + return Ok(Some(pkg)); + } + Ok(None) + } + + /// Completes at least one downloading, maybe waiting for more to complete. + /// + /// This function will block the current thread waiting for at least one + /// crate to finish downloading. The function may continue to download more + /// crates if it looks like there's a long enough queue of crates to keep + /// downloading. When only a handful of packages remain this function + /// returns, and it's hoped that by returning we'll be able to push more + /// packages to download into the queue. + fn finish_some_downloads(&mut self) -> CargoResult<()> { + assert!(self.downloads.remaining() > 0); + loop { + let pkg = self.downloads.wait()?; + self.waiting_on_download.remove(&pkg.package_id()); + self.pkgs.borrow_mut().insert(pkg.package_id(), pkg); + + // Arbitrarily choose that 5 or more packages concurrently download + // is a good enough number to "fill the network pipe". If we have + // less than this let's recompute the whole unit dependency graph + // again and try to find some more packages to download. + if self.downloads.remaining() < 5 { + break; + } + } + Ok(()) + } +} diff --git a/src/cargo/core/compiler/custom_build.rs b/src/cargo/core/compiler/custom_build.rs new file mode 100644 index 000000000..a0046b732 --- /dev/null +++ b/src/cargo/core/compiler/custom_build.rs @@ -0,0 +1,694 @@ +use std::collections::hash_map::{Entry, HashMap}; +use std::collections::{BTreeSet, HashSet}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::{Arc, Mutex}; + +use crate::core::PackageId; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::machine_message; +use crate::util::{self, internal, paths, profile}; +use crate::util::{Cfg, Freshness}; + +use super::job::Work; +use super::{fingerprint, Context, Kind, TargetConfig, Unit}; + +/// Contains the parsed output of a custom build script. +#[derive(Clone, Debug, Hash)] +pub struct BuildOutput { + /// Paths to pass to rustc with the `-L` flag. + pub library_paths: Vec, + /// Names and link kinds of libraries, suitable for the `-l` flag. + pub library_links: Vec, + /// Various `--cfg` flags to pass to the compiler. + pub cfgs: Vec, + /// Additional environment variables to run the compiler with. + pub env: Vec<(String, String)>, + /// Metadata to pass to the immediate dependencies. + pub metadata: Vec<(String, String)>, + /// Paths to trigger a rerun of this build script. + /// May be absolute or relative paths (relative to package root). + pub rerun_if_changed: Vec, + /// Environment variables which, when changed, will cause a rebuild. + pub rerun_if_env_changed: Vec, + /// Warnings generated by this build. + pub warnings: Vec, +} + +/// Map of packages to build info. +pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; + +/// Build info and overrides. +pub struct BuildState { + pub outputs: Mutex, + overrides: HashMap<(String, Kind), BuildOutput>, +} + +#[derive(Default)] +pub struct BuildScripts { + // Cargo will use this `to_link` vector to add `-L` flags to compiles as we + // propagate them upwards towards the final build. Note, however, that we + // need to preserve the ordering of `to_link` to be topologically sorted. + // This will ensure that build scripts which print their paths properly will + // correctly pick up the files they generated (if there are duplicates + // elsewhere). + // + // To preserve this ordering, the (id, kind) is stored in two places, once + // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain + // this as we're building interactively below to ensure that the memory + // usage here doesn't blow up too much. + // + // For more information, see #2354. + pub to_link: Vec<(PackageId, Kind)>, + seen_to_link: HashSet<(PackageId, Kind)>, + pub plugins: BTreeSet, +} + +pub struct BuildDeps { + pub build_script_output: PathBuf, + pub rerun_if_changed: Vec, + pub rerun_if_env_changed: Vec, +} + +/// Prepares a `Work` that executes the target as a custom build script. +/// +/// The `req` given is the requirement which this run of the build script will +/// prepare work for. If the requirement is specified as both the target and the +/// host platforms it is assumed that the two are equal and the build script is +/// only run once (not twice). +pub fn prepare<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<(Work, Work, Freshness)> { + let _p = profile::start(format!( + "build script prepare: {}/{}", + unit.pkg, + unit.target.name() + )); + + let key = (unit.pkg.package_id(), unit.kind); + let overridden = cx.build_script_overridden.contains(&key); + let (work_dirty, work_fresh) = if overridden { + (Work::noop(), Work::noop()) + } else { + build_work(cx, unit)? + }; + + if cx.bcx.build_config.build_plan { + Ok((work_dirty, work_fresh, Freshness::Dirty)) + } else { + // Now that we've prep'd our work, build the work needed to manage the + // fingerprint and then start returning that upwards. + let (freshness, dirty, fresh) = fingerprint::prepare_build_cmd(cx, unit)?; + + Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness)) + } +} + +fn emit_build_output(output: &BuildOutput, package_id: PackageId) { + let library_paths = output + .library_paths + .iter() + .map(|l| l.display().to_string()) + .collect::>(); + + machine_message::emit(&machine_message::BuildScript { + package_id, + linked_libs: &output.library_links, + linked_paths: &library_paths, + cfgs: &output.cfgs, + env: &output.env, + }); +} + +fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<(Work, Work)> { + assert!(unit.mode.is_run_custom_build()); + let bcx = &cx.bcx; + let dependencies = cx.dep_targets(unit); + let build_script_unit = dependencies + .iter() + .find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build()) + .expect("running a script not depending on an actual script"); + let script_dir = cx.files().build_script_dir(build_script_unit); + let script_out_dir = cx.files().build_script_out_dir(unit); + let build_plan = bcx.build_config.build_plan; + let invocation_name = unit.buildkey(); + + if let Some(deps) = unit.pkg.manifest().metabuild() { + prepare_metabuild(cx, build_script_unit, deps)?; + } + + // Building the command to execute + let to_exec = script_dir.join(unit.target.name()); + + // Start preparing the process to execute, starting out with some + // environment variables. Note that the profile-related environment + // variables are not set with this the build script's profile but rather the + // package's library profile. + // NOTE: if you add any profile flags, be sure to update + // `Profiles::get_profile_run_custom_build` so that those flags get + // carried over. + let to_exec = to_exec.into_os_string(); + let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; + let debug = unit.profile.debuginfo.unwrap_or(0) != 0; + cmd.env("OUT_DIR", &script_out_dir) + .env("CARGO_MANIFEST_DIR", unit.pkg.root()) + .env("NUM_JOBS", &bcx.jobs().to_string()) + .env( + "TARGET", + &match unit.kind { + Kind::Host => &bcx.host_triple(), + Kind::Target => bcx.target_triple(), + }, + ) + .env("DEBUG", debug.to_string()) + .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) + .env( + "PROFILE", + if bcx.build_config.release { + "release" + } else { + "debug" + }, + ) + .env("HOST", &bcx.host_triple()) + .env("RUSTC", &bcx.rustc.path) + .env("RUSTDOC", &*bcx.config.rustdoc()?) + .inherit_jobserver(&cx.jobserver); + + if let Some(ref linker) = bcx.target_config.linker { + cmd.env("RUSTC_LINKER", linker); + } + + if let Some(links) = unit.pkg.manifest().links() { + cmd.env("CARGO_MANIFEST_LINKS", links); + } + + // Be sure to pass along all enabled features for this package, this is the + // last piece of statically known information that we have. + for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { + cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); + } + + let mut cfg_map = HashMap::new(); + for cfg in bcx.cfg(unit.kind) { + match *cfg { + Cfg::Name(ref n) => { + cfg_map.insert(n.clone(), None); + } + Cfg::KeyPair(ref k, ref v) => { + if let Some(ref mut values) = + *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new())) + { + values.push(v.clone()) + } + } + } + } + for (k, v) in cfg_map { + let k = format!("CARGO_CFG_{}", super::envify(&k)); + match v { + Some(list) => { + cmd.env(&k, list.join(",")); + } + None => { + cmd.env(&k, ""); + } + } + } + + // Gather the set of native dependencies that this package has along with + // some other variables to close over. + // + // This information will be used at build-time later on to figure out which + // sorts of variables need to be discovered at that time. + let lib_deps = { + dependencies + .iter() + .filter_map(|unit| { + if unit.mode.is_run_custom_build() { + Some(( + unit.pkg.manifest().links().unwrap().to_string(), + unit.pkg.package_id(), + )) + } else { + None + } + }) + .collect::>() + }; + let pkg_name = unit.pkg.to_string(); + let build_state = Arc::clone(&cx.build_state); + let id = unit.pkg.package_id(); + let (output_file, err_file, root_output_file) = { + let build_output_parent = script_out_dir.parent().unwrap(); + let output_file = build_output_parent.join("output"); + let err_file = build_output_parent.join("stderr"); + let root_output_file = build_output_parent.join("root-output"); + (output_file, err_file, root_output_file) + }; + let host_target_root = cx.files().target_root().to_path_buf(); + let all = ( + id, + pkg_name.clone(), + Arc::clone(&build_state), + output_file.clone(), + script_out_dir.clone(), + ); + let build_scripts = super::load_build_deps(cx, unit); + let kind = unit.kind; + let json_messages = bcx.build_config.json_messages(); + let extra_verbose = bcx.config.extra_verbose(); + + // Check to see if the build script has already run, and if it has, keep + // track of whether it has told us about some explicit dependencies. + let prev_script_out_dir = paths::read_bytes(&root_output_file) + .and_then(|bytes| util::bytes2path(&bytes)) + .unwrap_or_else(|_| script_out_dir.clone()); + + let prev_output = BuildOutput::parse_file( + &output_file, + &pkg_name, + &prev_script_out_dir, + &script_out_dir, + ) + .ok(); + let deps = BuildDeps::new(&output_file, prev_output.as_ref()); + cx.build_explicit_deps.insert(*unit, deps); + + fs::create_dir_all(&script_dir)?; + fs::create_dir_all(&script_out_dir)?; + + // Prepare the unit of "dirty work" which will actually run the custom build + // command. + // + // Note that this has to do some extra work just before running the command + // to determine extra environment variables and such. + let dirty = Work::new(move |state| { + // Make sure that OUT_DIR exists. + // + // If we have an old build directory, then just move it into place, + // otherwise create it! + if fs::metadata(&script_out_dir).is_err() { + fs::create_dir(&script_out_dir).chain_err(|| { + internal( + "failed to create script output directory for \ + build command", + ) + })?; + } + + // For all our native lib dependencies, pick up their metadata to pass + // along to this custom build command. We're also careful to augment our + // dynamic library search path in case the build script depended on any + // native dynamic libraries. + if !build_plan { + let build_state = build_state.outputs.lock().unwrap(); + for (name, id) in lib_deps { + let key = (id, kind); + let state = build_state.get(&key).ok_or_else(|| { + internal(format!( + "failed to locate build state for env \ + vars: {}/{:?}", + id, kind + )) + })?; + let data = &state.metadata; + for &(ref key, ref value) in data.iter() { + cmd.env( + &format!("DEP_{}_{}", super::envify(&name), super::envify(key)), + value, + ); + } + } + if let Some(build_scripts) = build_scripts { + super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &host_target_root)?; + } + } + + // And now finally, run the build command itself! + if build_plan { + state.build_plan(invocation_name, cmd.clone(), Arc::new(Vec::new())); + } else { + state.running(&cmd); + let timestamp = paths::get_current_filesystem_time(&output_file)?; + let output = if extra_verbose { + let prefix = format!("[{} {}] ", id.name(), id.version()); + state.capture_output(&cmd, Some(prefix), true) + } else { + cmd.exec_with_output() + }; + let output = output.map_err(|e| { + failure::format_err!( + "failed to run custom build command for `{}`\n{}", + pkg_name, + e + ) + })?; + + // After the build command has finished running, we need to be sure to + // remember all of its output so we can later discover precisely what it + // was, even if we don't run the build command again (due to freshness). + // + // This is also the location where we provide feedback into the build + // state informing what variables were discovered via our script as + // well. + paths::write(&output_file, &output.stdout)?; + filetime::set_file_times(output_file, timestamp, timestamp)?; + paths::write(&err_file, &output.stderr)?; + paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?; + let parsed_output = + BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?; + + if json_messages { + emit_build_output(&parsed_output, id); + } + build_state.insert(id, kind, parsed_output); + } + Ok(()) + }); + + // Now that we've prepared our work-to-do, we need to prepare the fresh work + // itself to run when we actually end up just discarding what we calculated + // above. + let fresh = Work::new(move |_tx| { + let (id, pkg_name, build_state, output_file, script_out_dir) = all; + let output = match prev_output { + Some(output) => output, + None => BuildOutput::parse_file( + &output_file, + &pkg_name, + &prev_script_out_dir, + &script_out_dir, + )?, + }; + + if json_messages { + emit_build_output(&output, id); + } + + build_state.insert(id, kind, output); + Ok(()) + }); + + Ok((dirty, fresh)) +} + +impl BuildState { + pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState { + let mut overrides = HashMap::new(); + let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host)); + let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target)); + for ((name, output), kind) in i1.chain(i2) { + overrides.insert((name.clone(), kind), output.clone()); + } + BuildState { + outputs: Mutex::new(HashMap::new()), + overrides, + } + } + + fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) { + self.outputs.lock().unwrap().insert((id, kind), output); + } +} + +impl BuildOutput { + pub fn parse_file( + path: &Path, + pkg_name: &str, + script_out_dir_when_generated: &Path, + script_out_dir: &Path, + ) -> CargoResult { + let contents = paths::read_bytes(path)?; + BuildOutput::parse( + &contents, + pkg_name, + script_out_dir_when_generated, + script_out_dir, + ) + } + + // Parses the output of a script. + // The `pkg_name` is used for error messages. + pub fn parse( + input: &[u8], + pkg_name: &str, + script_out_dir_when_generated: &Path, + script_out_dir: &Path, + ) -> CargoResult { + let mut library_paths = Vec::new(); + let mut library_links = Vec::new(); + let mut cfgs = Vec::new(); + let mut env = Vec::new(); + let mut metadata = Vec::new(); + let mut rerun_if_changed = Vec::new(); + let mut rerun_if_env_changed = Vec::new(); + let mut warnings = Vec::new(); + let whence = format!("build script of `{}`", pkg_name); + + for line in input.split(|b| *b == b'\n') { + let line = match str::from_utf8(line) { + Ok(line) => line.trim(), + Err(..) => continue, + }; + let mut iter = line.splitn(2, ':'); + if iter.next() != Some("cargo") { + // skip this line since it doesn't start with "cargo:" + continue; + } + let data = match iter.next() { + Some(val) => val, + None => continue, + }; + + // getting the `key=value` part of the line + let mut iter = data.splitn(2, '='); + let key = iter.next(); + let value = iter.next(); + let (key, value) = match (key, value) { + (Some(a), Some(b)) => (a, b.trim_end()), + // Line started with `cargo:` but didn't match `key=value`. + _ => failure::bail!("Wrong output in {}: `{}`", whence, line), + }; + + // This will rewrite paths if the target directory has been moved. + let value = value.replace( + script_out_dir_when_generated.to_str().unwrap(), + script_out_dir.to_str().unwrap(), + ); + + match key { + "rustc-flags" => { + let (paths, links) = BuildOutput::parse_rustc_flags(&value, &whence)?; + library_links.extend(links.into_iter()); + library_paths.extend(paths.into_iter()); + } + "rustc-link-lib" => library_links.push(value.to_string()), + "rustc-link-search" => library_paths.push(PathBuf::from(value)), + "rustc-cfg" => cfgs.push(value.to_string()), + "rustc-env" => env.push(BuildOutput::parse_rustc_env(&value, &whence)?), + "warning" => warnings.push(value.to_string()), + "rerun-if-changed" => rerun_if_changed.push(PathBuf::from(value)), + "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), + _ => metadata.push((key.to_string(), value.to_string())), + } + } + + Ok(BuildOutput { + library_paths, + library_links, + cfgs, + env, + metadata, + rerun_if_changed, + rerun_if_env_changed, + warnings, + }) + } + + pub fn parse_rustc_flags( + value: &str, + whence: &str, + ) -> CargoResult<(Vec, Vec)> { + let value = value.trim(); + let mut flags_iter = value + .split(|c: char| c.is_whitespace()) + .filter(|w| w.chars().any(|c| !c.is_whitespace())); + let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); + while let Some(flag) = flags_iter.next() { + if flag != "-l" && flag != "-L" { + failure::bail!( + "Only `-l` and `-L` flags are allowed in {}: `{}`", + whence, + value + ) + } + let value = match flags_iter.next() { + Some(v) => v, + None => failure::bail!( + "Flag in rustc-flags has no value in {}: `{}`", + whence, + value + ), + }; + match flag { + "-l" => library_links.push(value.to_string()), + "-L" => library_paths.push(PathBuf::from(value)), + + // was already checked above + _ => failure::bail!("only -l and -L flags are allowed"), + }; + } + Ok((library_paths, library_links)) + } + + pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> { + let mut iter = value.splitn(2, '='); + let name = iter.next(); + let val = iter.next(); + match (name, val) { + (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), + _ => failure::bail!("Variable rustc-env has no value in {}: {}", whence, value), + } + } +} + +fn prepare_metabuild<'a, 'cfg>( + cx: &Context<'a, 'cfg>, + unit: &Unit<'a>, + deps: &[String], +) -> CargoResult<()> { + let mut output = Vec::new(); + let available_deps = cx.dep_targets(unit); + // Filter out optional dependencies, and look up the actual lib name. + let meta_deps: Vec<_> = deps + .iter() + .filter_map(|name| { + available_deps + .iter() + .find(|u| u.pkg.name().as_str() == name.as_str()) + .map(|dep| dep.target.crate_name()) + }) + .collect(); + for dep in &meta_deps { + output.push(format!("use {};\n", dep)); + } + output.push("fn main() {\n".to_string()); + for dep in &meta_deps { + output.push(format!(" {}::metabuild();\n", dep)); + } + output.push("}\n".to_string()); + let output = output.join(""); + let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); + fs::create_dir_all(path.parent().unwrap())?; + paths::write_if_changed(path, &output)?; + Ok(()) +} + +impl BuildDeps { + pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { + BuildDeps { + build_script_output: output_file.to_path_buf(), + rerun_if_changed: output + .map(|p| &p.rerun_if_changed) + .cloned() + .unwrap_or_default(), + rerun_if_env_changed: output + .map(|p| &p.rerun_if_env_changed) + .cloned() + .unwrap_or_default(), + } + } +} + +/// Computes the `build_scripts` map in the `Context` which tracks what build +/// scripts each package depends on. +/// +/// The global `build_scripts` map lists for all (package, kind) tuples what set +/// of packages' build script outputs must be considered. For example this lists +/// all dependencies' `-L` flags which need to be propagated transitively. +/// +/// The given set of targets to this function is the initial set of +/// targets/profiles which are being built. +pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { + let mut ret = HashMap::new(); + for unit in units { + build(&mut ret, cx, unit)?; + } + cx.build_scripts + .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v)))); + return Ok(()); + + // Recursive function to build up the map we're constructing. This function + // memoizes all of its return values as it goes along. + fn build<'a, 'b, 'cfg>( + out: &'a mut HashMap, BuildScripts>, + cx: &mut Context<'b, 'cfg>, + unit: &Unit<'b>, + ) -> CargoResult<&'a BuildScripts> { + // Do a quick pre-flight check to see if we've already calculated the + // set of dependencies. + if out.contains_key(unit) { + return Ok(&out[unit]); + } + + { + let key = unit + .pkg + .manifest() + .links() + .map(|l| (l.to_string(), unit.kind)); + let build_state = &cx.build_state; + if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { + let key = (unit.pkg.package_id(), unit.kind); + cx.build_script_overridden.insert(key); + build_state + .outputs + .lock() + .unwrap() + .insert(key, output.clone()); + } + } + + let mut ret = BuildScripts::default(); + + if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { + add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); + } + + // We want to invoke the compiler deterministically to be cache-friendly + // to rustc invocation caching schemes, so be sure to generate the same + // set of build script dependency orderings via sorting the targets that + // come out of the `Context`. + let mut targets = cx.dep_targets(unit); + targets.sort_by_key(|u| u.pkg.package_id()); + + for unit in targets.iter() { + let dep_scripts = build(out, cx, unit)?; + + if unit.target.for_host() { + ret.plugins + .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned()); + } else if unit.target.linkable() { + for &(pkg, kind) in dep_scripts.to_link.iter() { + add_to_link(&mut ret, pkg, kind); + } + } + } + + match out.entry(*unit) { + Entry::Vacant(entry) => Ok(entry.insert(ret)), + Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), + } + } + + // When adding an entry to 'to_link' we only actually push it on if the + // script hasn't seen it yet (e.g., we don't push on duplicates). + fn add_to_link(scripts: &mut BuildScripts, pkg: PackageId, kind: Kind) { + if scripts.seen_to_link.insert((pkg, kind)) { + scripts.to_link.push((pkg, kind)); + } + } +} diff --git a/src/cargo/core/compiler/fingerprint.rs b/src/cargo/core/compiler/fingerprint.rs new file mode 100644 index 000000000..4e23fb0fd --- /dev/null +++ b/src/cargo/core/compiler/fingerprint.rs @@ -0,0 +1,895 @@ +use std::env; +use std::fs; +use std::hash::{self, Hasher}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; +use std::time::SystemTime; + +use filetime::FileTime; +use log::{debug, info}; +use serde::de; +use serde::ser; +use serde::{Deserialize, Serialize}; + +use crate::core::{Edition, Package}; +use crate::util; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::{internal, profile, Dirty, Fresh, Freshness}; + +use super::custom_build::BuildDeps; +use super::job::Work; +use super::{BuildContext, Context, FileFlavor, Unit}; + +/// A tuple result of the `prepare_foo` functions in this module. +/// +/// The first element of the triple is whether the target in question is +/// currently fresh or not, and the second two elements are work to perform when +/// the target is dirty or fresh, respectively. +/// +/// Both units of work are always generated because a fresh package may still be +/// rebuilt if some upstream dependency changes. +pub type Preparation = (Freshness, Work, Work); + +/// Prepare the necessary work for the fingerprint for a specific target. +/// +/// When dealing with fingerprints, cargo gets to choose what granularity +/// "freshness" is considered at. One option is considering freshness at the +/// package level. This means that if anything in a package changes, the entire +/// package is rebuilt, unconditionally. This simplicity comes at a cost, +/// however, in that test-only changes will cause libraries to be rebuilt, which +/// is quite unfortunate! +/// +/// The cost was deemed high enough that fingerprints are now calculated at the +/// layer of a target rather than a package. Each target can then be kept track +/// of separately and only rebuilt as necessary. This requires cargo to +/// understand what the inputs are to a target, so we drive rustc with the +/// --dep-info flag to learn about all input files to a unit of compilation. +/// +/// This function will calculate the fingerprint for a target and prepare the +/// work necessary to either write the fingerprint or copy over all fresh files +/// from the old directories to their new locations. +pub fn prepare_target<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult { + let _p = profile::start(format!( + "fingerprint: {} / {}", + unit.pkg.package_id(), + unit.target.name() + )); + let bcx = cx.bcx; + let new = cx.files().fingerprint_dir(unit); + let loc = new.join(&filename(cx, unit)); + + debug!("fingerprint at: {}", loc.display()); + + let fingerprint = calculate(cx, unit)?; + let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use; + let compare = compare_old_fingerprint(&loc, &*fingerprint, mtime_on_use); + log_compare(unit, &compare); + + // If our comparison failed (e.g., we're going to trigger a rebuild of this + // crate), then we also ensure the source of the crate passes all + // verification checks before we build it. + // + // The `Source::verify` method is intended to allow sources to execute + // pre-build checks to ensure that the relevant source code is all + // up-to-date and as expected. This is currently used primarily for + // directory sources which will use this hook to perform an integrity check + // on all files in the source to ensure they haven't changed. If they have + // changed then an error is issued. + if compare.is_err() { + let source_id = unit.pkg.package_id().source_id(); + let sources = bcx.packages.sources(); + let source = sources + .get(source_id) + .ok_or_else(|| internal("missing package source"))?; + source.verify(unit.pkg.package_id())?; + } + + let root = cx.files().out_dir(unit); + let missing_outputs = { + let t = FileTime::from_system_time(SystemTime::now()); + if unit.mode.is_doc() { + !root + .join(unit.target.crate_name()) + .join("index.html") + .exists() + } else { + match cx + .outputs(unit)? + .iter() + .filter(|output| output.flavor != FileFlavor::DebugInfo) + .find(|output| { + if output.path.exists() { + if mtime_on_use { + // update the mtime so other cleaners know we used it + let _ = filetime::set_file_times(&output.path, t, t); + } + false + } else { + true + } + }) { + None => false, + Some(output) => { + info!("missing output path {:?}", output.path); + true + } + } + } + }; + + let allow_failure = bcx.extra_args_for(unit).is_some(); + let target_root = cx.files().target_root().to_path_buf(); + let write_fingerprint = Work::new(move |_| { + match fingerprint.update_local(&target_root) { + Ok(()) => {} + Err(..) if allow_failure => return Ok(()), + Err(e) => return Err(e), + } + write_fingerprint(&loc, &*fingerprint) + }); + + let fresh = compare.is_ok() && !missing_outputs; + Ok(( + if fresh { Fresh } else { Dirty }, + write_fingerprint, + Work::noop(), + )) +} + +/// A compilation unit dependency has a fingerprint that is comprised of: +/// * its package ID +/// * its extern crate name +/// * its calculated fingerprint for the dependency +struct DepFingerprint { + pkg_id: String, + name: String, + fingerprint: Arc, +} + +/// A fingerprint can be considered to be a "short string" representing the +/// state of a world for a package. +/// +/// If a fingerprint ever changes, then the package itself needs to be +/// recompiled. Inputs to the fingerprint include source code modifications, +/// compiler flags, compiler version, etc. This structure is not simply a +/// `String` due to the fact that some fingerprints cannot be calculated lazily. +/// +/// Path sources, for example, use the mtime of the corresponding dep-info file +/// as a fingerprint (all source files must be modified *before* this mtime). +/// This dep-info file is not generated, however, until after the crate is +/// compiled. As a result, this structure can be thought of as a fingerprint +/// to-be. The actual value can be calculated via `hash()`, but the operation +/// may fail as some files may not have been generated. +/// +/// Note that dependencies are taken into account for fingerprints because rustc +/// requires that whenever an upstream crate is recompiled that all downstream +/// dependants are also recompiled. This is typically tracked through +/// `DependencyQueue`, but it also needs to be retained here because Cargo can +/// be interrupted while executing, losing the state of the `DependencyQueue` +/// graph. +#[derive(Serialize, Deserialize)] +pub struct Fingerprint { + rustc: u64, + features: String, + target: u64, + profile: u64, + path: u64, + deps: Vec, + local: Vec, + #[serde(skip_serializing, skip_deserializing)] + memoized_hash: Mutex>, + rustflags: Vec, + edition: Edition, +} + +impl Serialize for DepFingerprint { + fn serialize(&self, ser: S) -> Result + where + S: ser::Serializer, + { + (&self.pkg_id, &self.name, &self.fingerprint.hash()).serialize(ser) + } +} + +impl<'de> Deserialize<'de> for DepFingerprint { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let (pkg_id, name, hash) = <(String, String, u64)>::deserialize(d)?; + Ok(DepFingerprint { + pkg_id, + name, + fingerprint: Arc::new(Fingerprint { + local: vec![LocalFingerprint::Precalculated(String::new())], + memoized_hash: Mutex::new(Some(hash)), + ..Fingerprint::new() + }), + }) + } +} + +#[derive(Serialize, Deserialize, Hash)] +enum LocalFingerprint { + Precalculated(String), + MtimeBased(MtimeSlot, PathBuf), + EnvBased(String, Option), +} + +impl LocalFingerprint { + fn mtime(root: &Path, mtime: Option, path: &Path) -> LocalFingerprint { + let mtime = MtimeSlot(Mutex::new(mtime)); + assert!(path.is_absolute()); + let path = path.strip_prefix(root).unwrap_or(path); + LocalFingerprint::MtimeBased(mtime, path.to_path_buf()) + } +} + +struct MtimeSlot(Mutex>); + +impl Fingerprint { + fn new() -> Fingerprint { + Fingerprint { + rustc: 0, + target: 0, + profile: 0, + path: 0, + features: String::new(), + deps: Vec::new(), + local: Vec::new(), + memoized_hash: Mutex::new(None), + edition: Edition::Edition2015, + rustflags: Vec::new(), + } + } + + fn update_local(&self, root: &Path) -> CargoResult<()> { + for local in self.local.iter() { + match *local { + LocalFingerprint::MtimeBased(ref slot, ref path) => { + let path = root.join(path); + let mtime = paths::mtime(&path)?; + *slot.0.lock().unwrap() = Some(mtime); + } + LocalFingerprint::EnvBased(..) | LocalFingerprint::Precalculated(..) => continue, + } + } + + *self.memoized_hash.lock().unwrap() = None; + Ok(()) + } + + fn hash(&self) -> u64 { + if let Some(s) = *self.memoized_hash.lock().unwrap() { + return s; + } + let ret = util::hash_u64(self); + *self.memoized_hash.lock().unwrap() = Some(ret); + ret + } + + fn compare(&self, old: &Fingerprint) -> CargoResult<()> { + if self.rustc != old.rustc { + failure::bail!("rust compiler has changed") + } + if self.features != old.features { + failure::bail!( + "features have changed: {} != {}", + self.features, + old.features + ) + } + if self.target != old.target { + failure::bail!("target configuration has changed") + } + if self.path != old.path { + failure::bail!("path to the compiler has changed") + } + if self.profile != old.profile { + failure::bail!("profile configuration has changed") + } + if self.rustflags != old.rustflags { + failure::bail!("RUSTFLAGS has changed") + } + if self.local.len() != old.local.len() { + failure::bail!("local lens changed"); + } + if self.edition != old.edition { + failure::bail!("edition changed") + } + for (new, old) in self.local.iter().zip(&old.local) { + match (new, old) { + ( + &LocalFingerprint::Precalculated(ref a), + &LocalFingerprint::Precalculated(ref b), + ) => { + if a != b { + failure::bail!("precalculated components have changed: {} != {}", a, b) + } + } + ( + &LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap), + &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp), + ) => { + let on_disk_mtime = on_disk_mtime.0.lock().unwrap(); + let previously_built_mtime = previously_built_mtime.0.lock().unwrap(); + + let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) { + (None, None) => false, + (Some(_), None) | (None, Some(_)) => true, + (Some(on_disk), Some(previously_built)) => on_disk > previously_built, + }; + + if should_rebuild { + failure::bail!( + "mtime based components have changed: previously {:?} now {:?}, \ + paths are {:?} and {:?}", + *previously_built_mtime, + *on_disk_mtime, + ap, + bp + ) + } + } + ( + &LocalFingerprint::EnvBased(ref akey, ref avalue), + &LocalFingerprint::EnvBased(ref bkey, ref bvalue), + ) => { + if *akey != *bkey { + failure::bail!("env vars changed: {} != {}", akey, bkey); + } + if *avalue != *bvalue { + failure::bail!( + "env var `{}` changed: previously {:?} now {:?}", + akey, + bvalue, + avalue + ) + } + } + _ => failure::bail!("local fingerprint type has changed"), + } + } + + if self.deps.len() != old.deps.len() { + failure::bail!("number of dependencies has changed") + } + for (a, b) in self.deps.iter().zip(old.deps.iter()) { + if a.name != b.name || a.fingerprint.hash() != b.fingerprint.hash() { + failure::bail!("new ({}) != old ({})", a.pkg_id, b.pkg_id) + } + } + Ok(()) + } +} + +impl hash::Hash for Fingerprint { + fn hash(&self, h: &mut H) { + let Fingerprint { + rustc, + ref features, + target, + path, + profile, + ref deps, + ref local, + edition, + ref rustflags, + .. + } = *self; + ( + rustc, features, target, path, profile, local, edition, rustflags, + ) + .hash(h); + + h.write_usize(deps.len()); + for DepFingerprint { + pkg_id, + name, + fingerprint, + } in deps + { + pkg_id.hash(h); + name.hash(h); + // use memoized dep hashes to avoid exponential blowup + h.write_u64(Fingerprint::hash(fingerprint)); + } + } +} + +impl hash::Hash for MtimeSlot { + fn hash(&self, h: &mut H) { + self.0.lock().unwrap().hash(h) + } +} + +impl ser::Serialize for MtimeSlot { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.0 + .lock() + .unwrap() + .map(|ft| (ft.unix_seconds(), ft.nanoseconds())) + .serialize(s) + } +} + +impl<'de> de::Deserialize<'de> for MtimeSlot { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let kind: Option<(i64, u32)> = de::Deserialize::deserialize(d)?; + Ok(MtimeSlot(Mutex::new( + kind.map(|(s, n)| FileTime::from_unix_time(s, n)), + ))) + } +} + +/// Calculates the fingerprint for a package/target pair. +/// +/// This fingerprint is used by Cargo to learn about when information such as: +/// +/// * A non-path package changes (changes version, changes revision, etc). +/// * Any dependency changes +/// * The compiler changes +/// * The set of features a package is built with changes +/// * The profile a target is compiled with changes (e.g., opt-level changes) +/// +/// Information like file modification time is only calculated for path +/// dependencies and is calculated in `calculate_target_fresh`. +fn calculate<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult> { + let bcx = cx.bcx; + if let Some(s) = cx.fingerprints.get(unit) { + return Ok(Arc::clone(s)); + } + + // Next, recursively calculate the fingerprint for all of our dependencies. + // + // Skip the fingerprints of build scripts as they may not always be + // available and the dirtiness propagation for modification is tracked + // elsewhere. Also skip fingerprints of binaries because they don't actually + // induce a recompile, they're just dependencies in the sense that they need + // to be built. + let deps = cx.dep_targets(unit); + let deps = deps + .iter() + .filter(|u| !u.target.is_custom_build() && !u.target.is_bin()) + .map(|dep| { + calculate(cx, dep).and_then(|fingerprint| { + let name = cx.bcx.extern_crate_name(unit, dep)?; + Ok(DepFingerprint { + pkg_id: dep.pkg.package_id().to_string(), + name, + fingerprint, + }) + }) + }) + .collect::>>()?; + + // And finally, calculate what our own local fingerprint is + let local = if use_dep_info(unit) { + let dep_info = dep_info_loc(cx, unit); + let mtime = dep_info_mtime_if_fresh(unit.pkg, &dep_info)?; + LocalFingerprint::mtime(cx.files().target_root(), mtime, &dep_info) + } else { + let fingerprint = pkg_fingerprint(&cx.bcx, unit.pkg)?; + LocalFingerprint::Precalculated(fingerprint) + }; + let mut deps = deps; + deps.sort_by(|a, b| a.pkg_id.cmp(&b.pkg_id)); + let extra_flags = if unit.mode.is_doc() { + bcx.rustdocflags_args(unit)? + } else { + bcx.rustflags_args(unit)? + }; + let profile_hash = util::hash_u64(&(&unit.profile, unit.mode, bcx.extra_args_for(unit))); + let fingerprint = Arc::new(Fingerprint { + rustc: util::hash_u64(&bcx.rustc.verbose_version), + target: util::hash_u64(&unit.target), + profile: profile_hash, + // Note that .0 is hashed here, not .1 which is the cwd. That doesn't + // actually affect the output artifact so there's no need to hash it. + path: util::hash_u64(&super::path_args(&cx.bcx, unit).0), + features: format!("{:?}", bcx.resolve.features_sorted(unit.pkg.package_id())), + deps, + local: vec![local], + memoized_hash: Mutex::new(None), + edition: unit.target.edition(), + rustflags: extra_flags, + }); + cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); + Ok(fingerprint) +} + +// We want to use the mtime for files if we're a path source, but if we're a +// git/registry source, then the mtime of files may fluctuate, but they won't +// change so long as the source itself remains constant (which is the +// responsibility of the source) +fn use_dep_info(unit: &Unit<'_>) -> bool { + let path = unit.pkg.summary().source_id().is_path(); + !unit.mode.is_doc() && path +} + +/// Prepare the necessary work for the fingerprint of a build command. +/// +/// Build commands are located on packages, not on targets. Additionally, we +/// don't have --dep-info to drive calculation of the fingerprint of a build +/// command. This brings up an interesting predicament which gives us a few +/// options to figure out whether a build command is dirty or not: +/// +/// 1. A build command is dirty if *any* file in a package changes. In theory +/// all files are candidate for being used by the build command. +/// 2. A build command is dirty if any file in a *specific directory* changes. +/// This may lose information as it may require files outside of the specific +/// directory. +/// 3. A build command must itself provide a dep-info-like file stating how it +/// should be considered dirty or not. +/// +/// The currently implemented solution is option (1), although it is planned to +/// migrate to option (2) in the near future. +pub fn prepare_build_cmd<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult { + let _p = profile::start(format!("fingerprint build cmd: {}", unit.pkg.package_id())); + let new = cx.files().fingerprint_dir(unit); + let loc = new.join("build"); + + debug!("fingerprint at: {}", loc.display()); + + let (local, output_path) = build_script_local_fingerprints(cx, unit)?; + let mut fingerprint = Fingerprint { + local, + rustc: util::hash_u64(&cx.bcx.rustc.verbose_version), + ..Fingerprint::new() + }; + let mtime_on_use = cx.bcx.config.cli_unstable().mtime_on_use; + let compare = compare_old_fingerprint(&loc, &fingerprint, mtime_on_use); + log_compare(unit, &compare); + + // When we write out the fingerprint, we may want to actually change the + // kind of fingerprint being recorded. If we started out, then the previous + // run of the build script (or if it had never run before) may indicate to + // use the `Precalculated` variant with the `pkg_fingerprint`. If the build + // script then prints `rerun-if-changed`, however, we need to record what's + // necessary for that fingerprint. + // + // Hence, if there were some `rerun-if-changed` directives forcibly change + // the kind of fingerprint by reinterpreting the dependencies output by the + // build script. + let state = Arc::clone(&cx.build_state); + let key = (unit.pkg.package_id(), unit.kind); + let pkg_root = unit.pkg.root().to_path_buf(); + let target_root = cx.files().target_root().to_path_buf(); + let write_fingerprint = Work::new(move |_| { + if let Some(output_path) = output_path { + let outputs = state.outputs.lock().unwrap(); + let outputs = &outputs[&key]; + if !outputs.rerun_if_changed.is_empty() || !outputs.rerun_if_env_changed.is_empty() { + let deps = BuildDeps::new(&output_path, Some(outputs)); + fingerprint.local = local_fingerprints_deps(&deps, &target_root, &pkg_root); + fingerprint.update_local(&target_root)?; + } + } + write_fingerprint(&loc, &fingerprint) + }); + + Ok(( + if compare.is_ok() { Fresh } else { Dirty }, + write_fingerprint, + Work::noop(), + )) +} + +fn build_script_local_fingerprints<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<(Vec, Option)> { + let state = cx.build_state.outputs.lock().unwrap(); + // First up, if this build script is entirely overridden, then we just + // return the hash of what we overrode it with. + // + // Note that the `None` here means that we don't want to update the local + // fingerprint afterwards because this is all just overridden. + if let Some(output) = state.get(&(unit.pkg.package_id(), unit.kind)) { + debug!("override local fingerprints deps"); + let s = format!( + "overridden build state with hash: {}", + util::hash_u64(output) + ); + return Ok((vec![LocalFingerprint::Precalculated(s)], None)); + } + + // Next up we look at the previously listed dependencies for the build + // script. If there are none then we're in the "old mode" where we just + // assume that we're changed if anything in the packaged changed. The + // `Some` here though means that we want to update our local fingerprints + // after we're done as running this build script may have created more + // dependencies. + let deps = &cx.build_explicit_deps[unit]; + let output = deps.build_script_output.clone(); + if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { + debug!("old local fingerprints deps"); + let s = pkg_fingerprint(&cx.bcx, unit.pkg)?; + return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output))); + } + + // Ok so now we're in "new mode" where we can have files listed as + // dependencies as well as env vars listed as dependencies. Process them all + // here. + Ok(( + local_fingerprints_deps(deps, cx.files().target_root(), unit.pkg.root()), + Some(output), + )) +} + +fn local_fingerprints_deps( + deps: &BuildDeps, + target_root: &Path, + pkg_root: &Path, +) -> Vec { + debug!("new local fingerprints deps"); + let mut local = Vec::new(); + if !deps.rerun_if_changed.is_empty() { + let output = &deps.build_script_output; + let deps = deps.rerun_if_changed.iter().map(|p| pkg_root.join(p)); + let mtime = mtime_if_fresh(output, deps); + local.push(LocalFingerprint::mtime(target_root, mtime, output)); + } + + for var in deps.rerun_if_env_changed.iter() { + let val = env::var(var).ok(); + local.push(LocalFingerprint::EnvBased(var.clone(), val)); + } + + local +} + +fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> { + debug_assert_ne!(fingerprint.rustc, 0); + // fingerprint::new().rustc == 0, make sure it doesn't make it to the file system. + // This is mostly so outside tools can reliably find out what rust version this file is for, + // as we can use the full hash. + let hash = fingerprint.hash(); + debug!("write fingerprint: {}", loc.display()); + paths::write(loc, util::to_hex(hash).as_bytes())?; + paths::write( + &loc.with_extension("json"), + &serde_json::to_vec(&fingerprint).unwrap(), + )?; + Ok(()) +} + +/// Prepare for work when a package starts to build +pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> { + let new1 = cx.files().fingerprint_dir(unit); + + if fs::metadata(&new1).is_err() { + fs::create_dir(&new1)?; + } + + Ok(()) +} + +pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { + cx.files() + .fingerprint_dir(unit) + .join(&format!("dep-{}", filename(cx, unit))) +} + +fn compare_old_fingerprint( + loc: &Path, + new_fingerprint: &Fingerprint, + mtime_on_use: bool, +) -> CargoResult<()> { + let old_fingerprint_short = paths::read(loc)?; + + if mtime_on_use { + // update the mtime so other cleaners know we used it + let t = FileTime::from_system_time(SystemTime::now()); + filetime::set_file_times(loc, t, t)?; + } + + let new_hash = new_fingerprint.hash(); + + if util::to_hex(new_hash) == old_fingerprint_short { + return Ok(()); + } + + let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; + let old_fingerprint = serde_json::from_str(&old_fingerprint_json) + .chain_err(|| internal("failed to deserialize json"))?; + new_fingerprint.compare(&old_fingerprint) +} + +fn log_compare(unit: &Unit<'_>, compare: &CargoResult<()>) { + let ce = match *compare { + Ok(..) => return, + Err(ref e) => e, + }; + info!("fingerprint error for {}: {}", unit.pkg, ce); + + for cause in ce.iter_causes() { + info!(" cause: {}", cause); + } +} + +// Parse the dep-info into a list of paths +pub fn parse_dep_info(pkg: &Package, dep_info: &Path) -> CargoResult>> { + let data = match paths::read_bytes(dep_info) { + Ok(data) => data, + Err(_) => return Ok(None), + }; + let paths = data + .split(|&x| x == 0) + .filter(|x| !x.is_empty()) + .map(|p| util::bytes2path(p).map(|p| pkg.root().join(p))) + .collect::, _>>()?; + if paths.is_empty() { + Ok(None) + } else { + Ok(Some(paths)) + } +} + +fn dep_info_mtime_if_fresh(pkg: &Package, dep_info: &Path) -> CargoResult> { + if let Some(paths) = parse_dep_info(pkg, dep_info)? { + Ok(mtime_if_fresh(dep_info, paths.iter())) + } else { + Ok(None) + } +} + +fn pkg_fingerprint(bcx: &BuildContext<'_, '_>, pkg: &Package) -> CargoResult { + let source_id = pkg.package_id().source_id(); + let sources = bcx.packages.sources(); + + let source = sources + .get(source_id) + .ok_or_else(|| internal("missing package source"))?; + source.fingerprint(pkg) +} + +fn mtime_if_fresh(output: &Path, paths: I) -> Option +where + I: IntoIterator, + I::Item: AsRef, +{ + let mtime = match paths::mtime(output) { + Ok(mtime) => mtime, + Err(..) => return None, + }; + + let any_stale = paths.into_iter().any(|path| { + let path = path.as_ref(); + let mtime2 = match paths::mtime(path) { + Ok(mtime) => mtime, + Err(..) => { + info!("stale: {} -- missing", path.display()); + return true; + } + }; + + // TODO: fix #5918. + // Note that equal mtimes should be considered "stale". For filesystems with + // not much timestamp precision like 1s this is would be a conservative approximation + // to handle the case where a file is modified within the same second after + // a build starts. We want to make sure that incremental rebuilds pick that up! + // + // For filesystems with nanosecond precision it's been seen in the wild that + // its "nanosecond precision" isn't really nanosecond-accurate. It turns out that + // kernels may cache the current time so files created at different times actually + // list the same nanosecond precision. Some digging on #5919 picked up that the + // kernel caches the current time between timer ticks, which could mean that if + // a file is updated at most 10ms after a build starts then Cargo may not + // pick up the build changes. + // + // All in all, an equality check here would be a conservative assumption that, + // if equal, files were changed just after a previous build finished. + // Unfortunately this became problematic when (in #6484) cargo switch to more accurately + // measuring the start time of builds. + if mtime2 > mtime { + info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime); + true + } else { + false + } + }); + + if any_stale { + None + } else { + Some(mtime) + } +} + +fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { + // file_stem includes metadata hash. Thus we have a different + // fingerprint for every metadata hash version. This works because + // even if the package is fresh, we'll still link the fresh target + let file_stem = cx.files().file_stem(unit); + let kind = unit.target.kind().description(); + let flavor = if unit.mode.is_any_test() { + "test-" + } else if unit.mode.is_doc() { + "doc-" + } else { + "" + }; + format!("{}{}-{}", flavor, kind, file_stem) +} + +/// Parses the dep-info file coming out of rustc into a Cargo-specific format. +/// +/// This function will parse `rustc_dep_info` as a makefile-style dep info to +/// learn about the all files which a crate depends on. This is then +/// re-serialized into the `cargo_dep_info` path in a Cargo-specific format. +/// +/// The `pkg_root` argument here is the absolute path to the directory +/// containing `Cargo.toml` for this crate that was compiled. The paths listed +/// in the rustc dep-info file may or may not be absolute but we'll want to +/// consider all of them relative to the `root` specified. +/// +/// The `rustc_cwd` argument is the absolute path to the cwd of the compiler +/// when it was invoked. +/// +/// The serialized Cargo format will contain a list of files, all of which are +/// relative if they're under `root`. or absolute if they're elsewhere. +pub fn translate_dep_info( + rustc_dep_info: &Path, + cargo_dep_info: &Path, + pkg_root: &Path, + rustc_cwd: &Path, +) -> CargoResult<()> { + let target = parse_rustc_dep_info(rustc_dep_info)?; + let deps = &target + .get(0) + .ok_or_else(|| internal("malformed dep-info format, no targets".to_string()))? + .1; + + let mut new_contents = Vec::new(); + for file in deps { + let absolute = rustc_cwd.join(file); + let path = absolute.strip_prefix(pkg_root).unwrap_or(&absolute); + new_contents.extend(util::path2bytes(path)?); + new_contents.push(0); + } + paths::write(cargo_dep_info, &new_contents)?; + Ok(()) +} + +pub fn parse_rustc_dep_info(rustc_dep_info: &Path) -> CargoResult)>> { + let contents = paths::read(rustc_dep_info)?; + contents + .lines() + .filter_map(|l| l.find(": ").map(|i| (l, i))) + .map(|(line, pos)| { + let target = &line[..pos]; + let mut deps = line[pos + 2..].split_whitespace(); + + let mut ret = Vec::new(); + while let Some(s) = deps.next() { + let mut file = s.to_string(); + while file.ends_with('\\') { + file.pop(); + file.push(' '); + file.push_str(deps.next().ok_or_else(|| { + internal("malformed dep-info format, trailing \\".to_string()) + })?); + } + ret.push(file); + } + Ok((target.to_string(), ret)) + }) + .collect() +} diff --git a/src/cargo/core/compiler/job.rs b/src/cargo/core/compiler/job.rs new file mode 100644 index 000000000..ca788b2ff --- /dev/null +++ b/src/cargo/core/compiler/job.rs @@ -0,0 +1,71 @@ +use std::fmt; + +use super::job_queue::JobState; +use crate::util::{CargoResult, Dirty, Fresh, Freshness}; + +pub struct Job { + dirty: Work, + fresh: Work, +} + +/// Each proc should send its description before starting. +/// It should send either once or close immediately. +pub struct Work { + inner: Box FnBox<&'a JobState<'b>, CargoResult<()>> + Send>, +} + +trait FnBox { + fn call_box(self: Box, a: A) -> R; +} + +impl R> FnBox for F { + fn call_box(self: Box, a: A) -> R { + (*self)(a) + } +} + +impl Work { + pub fn new(f: F) -> Work + where + F: FnOnce(&JobState<'_>) -> CargoResult<()> + Send + 'static, + { + Work { inner: Box::new(f) } + } + + pub fn noop() -> Work { + Work::new(|_| Ok(())) + } + + pub fn call(self, tx: &JobState<'_>) -> CargoResult<()> { + self.inner.call_box(tx) + } + + pub fn then(self, next: Work) -> Work { + Work::new(move |state| { + self.call(state)?; + next.call(state) + }) + } +} + +impl Job { + /// Creates a new job representing a unit of work. + pub fn new(dirty: Work, fresh: Work) -> Job { + Job { dirty, fresh } + } + + /// Consumes this job by running it, returning the result of the + /// computation. + pub fn run(self, fresh: Freshness, state: &JobState<'_>) -> CargoResult<()> { + match fresh { + Fresh => self.fresh.call(state), + Dirty => self.dirty.call(state), + } + } +} + +impl fmt::Debug for Job { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Job {{ ... }}") + } +} diff --git a/src/cargo/core/compiler/job_queue.rs b/src/cargo/core/compiler/job_queue.rs new file mode 100644 index 000000000..4fde1fbde --- /dev/null +++ b/src/cargo/core/compiler/job_queue.rs @@ -0,0 +1,582 @@ +use std::collections::hash_map::HashMap; +use std::collections::HashSet; +use std::fmt; +use std::io; +use std::mem; +use std::process::Output; +use std::sync::mpsc::{channel, Receiver, Sender}; +use std::sync::Arc; + +use crossbeam_utils::thread::Scope; +use jobserver::{Acquired, HelperThread}; +use log::{debug, info, trace}; + +use crate::core::profiles::Profile; +use crate::core::{PackageId, Target, TargetKind}; +use crate::handle_error; +use crate::util; +use crate::util::diagnostic_server::{self, DiagnosticPrinter}; +use crate::util::{internal, profile, CargoResult, CargoResultExt, ProcessBuilder}; +use crate::util::{Config, DependencyQueue, Dirty, Fresh, Freshness}; +use crate::util::{Progress, ProgressStyle}; +use super::context::OutputFile; +use super::job::Job; +use super::{BuildContext, BuildPlan, CompileMode, Context, Kind, Unit}; + +/// A management structure of the entire dependency graph to compile. +/// +/// This structure is backed by the `DependencyQueue` type and manages the +/// actual compilation step of each package. Packages enqueue units of work and +/// then later on the entire graph is processed and compiled. +pub struct JobQueue<'a, 'cfg> { + queue: DependencyQueue, Vec<(Job, Freshness)>>, + tx: Sender>, + rx: Receiver>, + active: Vec>, + pending: HashMap, PendingBuild>, + compiled: HashSet, + documented: HashSet, + counts: HashMap, + is_release: bool, + progress: Progress<'cfg>, +} + +/// A helper structure for metadata about the state of a building package. +struct PendingBuild { + /// The number of jobs currently active. + amt: usize, + /// The current freshness state of this package. Any dirty target within a + /// package will cause the entire package to become dirty. + fresh: Freshness, +} + +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +struct Key<'a> { + pkg: PackageId, + target: &'a Target, + profile: Profile, + kind: Kind, + mode: CompileMode, +} + +impl<'a> Key<'a> { + fn name_for_progress(&self) -> String { + let pkg_name = self.pkg.name(); + match self.mode { + CompileMode::Doc { .. } => format!("{}(doc)", pkg_name), + CompileMode::RunCustomBuild => format!("{}(build)", pkg_name), + _ => { + let annotation = match self.target.kind() { + TargetKind::Lib(_) => return pkg_name.to_string(), + TargetKind::CustomBuild => return format!("{}(build.rs)", pkg_name), + TargetKind::Bin => "bin", + TargetKind::Test => "test", + TargetKind::Bench => "bench", + TargetKind::ExampleBin | TargetKind::ExampleLib(_) => "example", + }; + format!("{}({})", self.target.name(), annotation) + } + } + } +} + +pub struct JobState<'a> { + tx: Sender>, +} + +enum Message<'a> { + Run(String), + BuildPlanMsg(String, ProcessBuilder, Arc>), + Stdout(String), + Stderr(String), + FixDiagnostic(diagnostic_server::Message), + Token(io::Result), + Finish(Key<'a>, CargoResult<()>), +} + +impl<'a> JobState<'a> { + pub fn running(&self, cmd: &ProcessBuilder) { + let _ = self.tx.send(Message::Run(cmd.to_string())); + } + + pub fn build_plan( + &self, + module_name: String, + cmd: ProcessBuilder, + filenames: Arc>, + ) { + let _ = self + .tx + .send(Message::BuildPlanMsg(module_name, cmd, filenames)); + } + + pub fn capture_output( + &self, + cmd: &ProcessBuilder, + prefix: Option, + capture_output: bool, + ) -> CargoResult { + let prefix = prefix.unwrap_or_else(String::new); + cmd.exec_with_streaming( + &mut |out| { + let _ = self.tx.send(Message::Stdout(format!("{}{}", prefix, out))); + Ok(()) + }, + &mut |err| { + let _ = self.tx.send(Message::Stderr(format!("{}{}", prefix, err))); + Ok(()) + }, + capture_output, + ) + } +} + +impl<'a, 'cfg> JobQueue<'a, 'cfg> { + pub fn new(bcx: &BuildContext<'a, 'cfg>) -> JobQueue<'a, 'cfg> { + let (tx, rx) = channel(); + let progress = Progress::with_style("Building", ProgressStyle::Ratio, bcx.config); + JobQueue { + queue: DependencyQueue::new(), + tx, + rx, + active: Vec::new(), + pending: HashMap::new(), + compiled: HashSet::new(), + documented: HashSet::new(), + counts: HashMap::new(), + is_release: bcx.build_config.release, + progress, + } + } + + pub fn enqueue( + &mut self, + cx: &Context<'a, 'cfg>, + unit: &Unit<'a>, + job: Job, + fresh: Freshness, + ) -> CargoResult<()> { + let key = Key::new(unit); + let deps = key.dependencies(cx)?; + self.queue + .queue(Fresh, &key, Vec::new(), &deps) + .push((job, fresh)); + *self.counts.entry(key.pkg).or_insert(0) += 1; + Ok(()) + } + + /// Executes all jobs necessary to build the dependency graph. + /// + /// This function will spawn off `config.jobs()` workers to build all of the + /// necessary dependencies, in order. Freshness is propagated as far as + /// possible along each dependency chain. + pub fn execute(&mut self, cx: &mut Context<'_, '_>, plan: &mut BuildPlan) -> CargoResult<()> { + let _p = profile::start("executing the job graph"); + self.queue.queue_finished(); + + // We need to give a handle to the send half of our message queue to the + // jobserver and (optionally) diagnostic helper thread. Unfortunately + // though we need the handle to be `'static` as that's typically what's + // required when spawning a thread! + // + // To work around this we transmute the `Sender` to a static lifetime. + // we're only sending "longer living" messages and we should also + // destroy all references to the channel before this function exits as + // the destructor for the `helper` object will ensure the associated + // thread is no longer running. + // + // As a result, this `transmute` to a longer lifetime should be safe in + // practice. + let tx = self.tx.clone(); + let tx = unsafe { mem::transmute::>, Sender>>(tx) }; + let tx2 = tx.clone(); + let helper = cx + .jobserver + .clone() + .into_helper_thread(move |token| { + drop(tx.send(Message::Token(token))); + }) + .chain_err(|| "failed to create helper thread for jobserver management")?; + let _diagnostic_server = cx + .bcx + .build_config + .rustfix_diagnostic_server + .borrow_mut() + .take() + .map(move |srv| srv.start(move |msg| drop(tx2.send(Message::FixDiagnostic(msg))))); + + crossbeam_utils::thread::scope(|scope| self.drain_the_queue(cx, plan, scope, &helper)) + .expect("child threads should't panic") + } + + fn drain_the_queue( + &mut self, + cx: &mut Context<'_, '_>, + plan: &mut BuildPlan, + scope: &Scope<'a>, + jobserver_helper: &HelperThread, + ) -> CargoResult<()> { + let mut tokens = Vec::new(); + let mut queue = Vec::new(); + let build_plan = cx.bcx.build_config.build_plan; + let mut print = DiagnosticPrinter::new(cx.bcx.config); + trace!("queue: {:#?}", self.queue); + + // Iteratively execute the entire dependency graph. Each turn of the + // loop starts out by scheduling as much work as possible (up to the + // maximum number of parallel jobs we have tokens for). A local queue + // is maintained separately from the main dependency queue as one + // dequeue may actually dequeue quite a bit of work (e.g., 10 binaries + // in one package). + // + // After a job has finished we update our internal state if it was + // successful and otherwise wait for pending work to finish if it failed + // and then immediately return. + let mut error = None; + let total = self.queue.len(); + loop { + // Dequeue as much work as we can, learning about everything + // possible that can run. Note that this is also the point where we + // start requesting job tokens. Each job after the first needs to + // request a token. + while let Some((fresh, key, jobs)) = self.queue.dequeue() { + let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| f.combine(fresh)); + self.pending.insert( + key, + PendingBuild { + amt: jobs.len(), + fresh: total_fresh, + }, + ); + for (job, f) in jobs { + queue.push((key, job, f.combine(fresh))); + if !self.active.is_empty() || !queue.is_empty() { + jobserver_helper.request_token(); + } + } + } + + // Now that we've learned of all possible work that we can execute + // try to spawn it so long as we've got a jobserver token which says + // we're able to perform some parallel work. + while error.is_none() && self.active.len() < tokens.len() + 1 && !queue.is_empty() { + let (key, job, fresh) = queue.remove(0); + self.run(key, fresh, job, cx.bcx.config, scope, build_plan)?; + } + + // If after all that we're not actually running anything then we're + // done! + if self.active.is_empty() { + break; + } + + // And finally, before we block waiting for the next event, drop any + // excess tokens we may have accidentally acquired. Due to how our + // jobserver interface is architected we may acquire a token that we + // don't actually use, and if this happens just relinquish it back + // to the jobserver itself. + tokens.truncate(self.active.len() - 1); + + // Drain all events at once to avoid displaying the progress bar + // unnecessarily. + let events: Vec<_> = self.rx.try_iter().collect(); + let events = if events.is_empty() { + self.show_progress(total); + vec![self.rx.recv().unwrap()] + } else { + events + }; + + for event in events { + match event { + Message::Run(cmd) => { + cx.bcx + .config + .shell() + .verbose(|c| c.status("Running", &cmd))?; + } + Message::BuildPlanMsg(module_name, cmd, filenames) => { + plan.update(&module_name, &cmd, &filenames)?; + } + Message::Stdout(out) => { + self.progress.clear(); + println!("{}", out); + } + Message::Stderr(err) => { + let mut shell = cx.bcx.config.shell(); + shell.print_ansi(err.as_bytes())?; + shell.err().write_all(b"\n")?; + } + Message::FixDiagnostic(msg) => { + print.print(&msg)?; + } + Message::Finish(key, result) => { + info!("end: {:?}", key); + + // FIXME: switch to this when stabilized. + // self.active.remove_item(&key); + let pos = self + .active + .iter() + .position(|k| *k == key) + .expect("an unrecorded package has finished compiling"); + self.active.remove(pos); + if !self.active.is_empty() { + assert!(!tokens.is_empty()); + drop(tokens.pop()); + } + match result { + Ok(()) => self.finish(key, cx)?, + Err(e) => { + let msg = "The following warnings were emitted during compilation:"; + self.emit_warnings(Some(msg), &key, cx)?; + + if !self.active.is_empty() { + error = Some(failure::format_err!("build failed")); + handle_error(&e, &mut *cx.bcx.config.shell()); + cx.bcx.config.shell().warn( + "build failed, waiting for other \ + jobs to finish...", + )?; + } else { + error = Some(e); + } + } + } + } + Message::Token(acquired_token) => { + tokens.push( + acquired_token.chain_err(|| "failed to acquire jobserver token")?, + ); + } + } + } + } + self.progress.clear(); + + let build_type = if self.is_release { "release" } else { "dev" }; + // NOTE: this may be a bit inaccurate, since this may not display the + // profile for what was actually built. Profile overrides can change + // these settings, and in some cases different targets are built with + // different profiles. To be accurate, it would need to collect a + // list of Units built, and maybe display a list of the different + // profiles used. However, to keep it simple and compatible with old + // behavior, we just display what the base profile is. + let profile = cx.bcx.profiles.base_profile(self.is_release); + let mut opt_type = String::from(if profile.opt_level.as_str() == "0" { + "unoptimized" + } else { + "optimized" + }); + if profile.debuginfo.is_some() { + opt_type += " + debuginfo"; + } + + let time_elapsed = util::elapsed(cx.bcx.config.creation_time().elapsed()); + + if self.queue.is_empty() { + let message = format!( + "{} [{}] target(s) in {}", + build_type, opt_type, time_elapsed + ); + if !build_plan { + cx.bcx.config.shell().status("Finished", message)?; + } + Ok(()) + } else if let Some(e) = error { + Err(e) + } else { + debug!("queue: {:#?}", self.queue); + Err(internal("finished with jobs still left in the queue")) + } + } + + fn show_progress(&mut self, total: usize) { + let count = total - self.queue.len(); + let active_names = self + .active + .iter() + .map(Key::name_for_progress) + .collect::>(); + drop( + self.progress + .tick_now(count, total, &format!(": {}", active_names.join(", "))), + ); + } + + /// Executes a job in the `scope` given, pushing the spawned thread's + /// handled onto `threads`. + fn run( + &mut self, + key: Key<'a>, + fresh: Freshness, + job: Job, + config: &Config, + scope: &Scope<'a>, + build_plan: bool, + ) -> CargoResult<()> { + info!("start: {:?}", key); + + self.active.push(key); + *self.counts.get_mut(&key.pkg).unwrap() -= 1; + + let my_tx = self.tx.clone(); + let doit = move || { + let res = job.run(fresh, &JobState { tx: my_tx.clone() }); + my_tx.send(Message::Finish(key, res)).unwrap(); + }; + + if !build_plan { + // Print out some nice progress information. + self.note_working_on(config, &key, fresh)?; + } + + match fresh { + Freshness::Fresh => doit(), + Freshness::Dirty => { + scope.spawn(move |_| doit()); + } + } + + Ok(()) + } + + fn emit_warnings( + &mut self, + msg: Option<&str>, + key: &Key<'a>, + cx: &mut Context<'_, '_>, + ) -> CargoResult<()> { + let output = cx.build_state.outputs.lock().unwrap(); + let bcx = &mut cx.bcx; + if let Some(output) = output.get(&(key.pkg, key.kind)) { + if !output.warnings.is_empty() { + if let Some(msg) = msg { + writeln!(bcx.config.shell().err(), "{}\n", msg)?; + } + + for warning in output.warnings.iter() { + bcx.config.shell().warn(warning)?; + } + + if msg.is_some() { + // Output an empty line. + writeln!(bcx.config.shell().err())?; + } + } + } + + Ok(()) + } + + fn finish(&mut self, key: Key<'a>, cx: &mut Context<'_, '_>) -> CargoResult<()> { + if key.mode.is_run_custom_build() && cx.bcx.show_warnings(key.pkg) { + self.emit_warnings(None, &key, cx)?; + } + + let state = self.pending.get_mut(&key).unwrap(); + state.amt -= 1; + if state.amt == 0 { + self.queue.finish(&key, state.fresh); + } + Ok(()) + } + + // This isn't super trivial because we don't want to print loads and + // loads of information to the console, but we also want to produce a + // faithful representation of what's happening. This is somewhat nuanced + // as a package can start compiling *very* early on because of custom + // build commands and such. + // + // In general, we try to print "Compiling" for the first nontrivial task + // run for a package, regardless of when that is. We then don't print + // out any more information for a package after we've printed it once. + fn note_working_on( + &mut self, + config: &Config, + key: &Key<'a>, + fresh: Freshness, + ) -> CargoResult<()> { + if (self.compiled.contains(&key.pkg) && !key.mode.is_doc()) + || (self.documented.contains(&key.pkg) && key.mode.is_doc()) + { + return Ok(()); + } + + match fresh { + // Any dirty stage which runs at least one command gets printed as + // being a compiled package. + Dirty => { + if key.mode.is_doc() { + // Skip doc test. + if !key.mode.is_any_test() { + self.documented.insert(key.pkg); + config.shell().status("Documenting", key.pkg)?; + } + } else { + self.compiled.insert(key.pkg); + if key.mode.is_check() { + config.shell().status("Checking", key.pkg)?; + } else { + config.shell().status("Compiling", key.pkg)?; + } + } + } + Fresh => { + // If doc test are last, only print "Fresh" if nothing has been printed. + if self.counts[&key.pkg] == 0 + && !(key.mode == CompileMode::Doctest && self.compiled.contains(&key.pkg)) + { + self.compiled.insert(key.pkg); + config.shell().verbose(|c| c.status("Fresh", key.pkg))?; + } + } + } + Ok(()) + } +} + +impl<'a> Key<'a> { + fn new(unit: &Unit<'a>) -> Key<'a> { + Key { + pkg: unit.pkg.package_id(), + target: unit.target, + profile: unit.profile, + kind: unit.kind, + mode: unit.mode, + } + } + + fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) -> CargoResult>> { + let unit = Unit { + pkg: cx.get_package(self.pkg)?, + target: self.target, + profile: self.profile, + kind: self.kind, + mode: self.mode, + }; + let targets = cx.dep_targets(&unit); + Ok(targets + .iter() + .filter_map(|unit| { + // Binaries aren't actually needed to *compile* tests, just to run + // them, so we don't include this dependency edge in the job graph. + if self.target.is_test() && unit.target.is_bin() { + None + } else { + Some(Key::new(unit)) + } + }) + .collect()) + } +} + +impl<'a> fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{} => {}/{} => {:?}", + self.pkg, self.target, self.profile, self.kind + ) + } +} diff --git a/src/cargo/core/compiler/layout.rs b/src/cargo/core/compiler/layout.rs new file mode 100644 index 000000000..9ea14800a --- /dev/null +++ b/src/cargo/core/compiler/layout.rs @@ -0,0 +1,212 @@ +//! Management of the directory layout of a build +//! +//! The directory layout is a little tricky at times, hence a separate file to +//! house this logic. The current layout looks like this: +//! +//! ```ignore +//! # This is the root directory for all output, the top-level package +//! # places all of its output here. +//! target/ +//! +//! # This is the root directory for all output of *dependencies* +//! deps/ +//! +//! # Root directory for all compiled examples +//! examples/ +//! +//! # This is the location at which the output of all custom build +//! # commands are rooted +//! build/ +//! +//! # Each package gets its own directory where its build script and +//! # script output are placed +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Each directory package has a `out` directory where output +//! # is placed. +//! out/ +//! +//! # This is the location at which the output of all old custom build +//! # commands are rooted +//! native/ +//! +//! # Each package gets its own directory for where its output is +//! # placed. We can't track exactly what's getting put in here, so +//! # we just assume that all relevant output is in these +//! # directories. +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Directory used to store incremental data for the compiler (when +//! # incremental is enabled. +//! incremental/ +//! +//! # Hidden directory that holds all of the fingerprint files for all +//! # packages +//! .fingerprint/ +//! ``` + +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use crate::core::Workspace; +use crate::util::{CargoResult, Config, FileLock, Filesystem}; + +/// Contains the paths of all target output locations. +/// +/// See module docs for more information. +pub struct Layout { + root: PathBuf, + deps: PathBuf, + native: PathBuf, + build: PathBuf, + incremental: PathBuf, + fingerprint: PathBuf, + examples: PathBuf, + /// The lock file for a build, will be unlocked when this struct is `drop`ped. + _lock: FileLock, +} + +pub fn is_bad_artifact_name(name: &str) -> bool { + ["deps", "examples", "build", "native", "incremental"] + .iter() + .any(|&reserved| reserved == name) +} + +impl Layout { + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + /// + /// Differs from `at` in that this calculates the root path from the workspace target directory, + /// adding the target triple and the profile (debug, release, ...). + pub fn new(ws: &Workspace<'_>, triple: Option<&str>, dest: &str) -> CargoResult { + let mut path = ws.target_dir(); + // Flexible target specifications often point at json files, so interpret + // the target triple as a Path and then just use the file stem as the + // component for the directory name in that case. + if let Some(triple) = triple { + let triple = Path::new(triple); + if triple.extension().and_then(|s| s.to_str()) == Some("json") { + path.push( + triple + .file_stem() + .ok_or_else(|| failure::format_err!("invalid target"))?, + ); + } else { + path.push(triple); + } + } + path.push(dest); + Layout::at(ws.config(), path) + } + + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + pub fn at(config: &Config, root: Filesystem) -> CargoResult { + // For now we don't do any more finer-grained locking on the artifact + // directory, so just lock the entire thing for the duration of this + // compile. + let lock = root.open_rw(".cargo-lock", config, "build directory")?; + let root = root.into_path_unlocked(); + + Ok(Layout { + deps: root.join("deps"), + native: root.join("native"), + build: root.join("build"), + incremental: root.join("incremental"), + fingerprint: root.join(".fingerprint"), + examples: root.join("examples"), + root, + _lock: lock, + }) + } + + #[cfg(not(target_os = "macos"))] + fn exclude_from_backups(&self, _: &Path) {} + + #[cfg(target_os = "macos")] + /// Marks files or directories as excluded from Time Machine on macOS + /// + /// This is recommended to prevent derived/temporary files from bloating backups. + fn exclude_from_backups(&self, path: &Path) { + use core_foundation::base::TCFType; + use core_foundation::{number, string, url}; + use std::ptr; + + // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey + let is_excluded_key: Result = "NSURLIsExcludedFromBackupKey".parse(); + let path = url::CFURL::from_path(path, false); + if let (Some(path), Ok(is_excluded_key)) = (path, is_excluded_key) { + unsafe { + url::CFURLSetResourcePropertyForKey( + path.as_concrete_TypeRef(), + is_excluded_key.as_concrete_TypeRef(), + number::kCFBooleanTrue as *const _, + ptr::null_mut(), + ); + } + } + // Errors are ignored, since it's an optional feature and failure + // doesn't prevent Cargo from working + } + + /// Makes sure all directories stored in the Layout exist on the filesystem. + pub fn prepare(&mut self) -> io::Result<()> { + if fs::metadata(&self.root).is_err() { + fs::create_dir_all(&self.root)?; + } + + self.exclude_from_backups(&self.root); + + mkdir(&self.deps)?; + mkdir(&self.native)?; + mkdir(&self.incremental)?; + mkdir(&self.fingerprint)?; + mkdir(&self.examples)?; + mkdir(&self.build)?; + + return Ok(()); + + fn mkdir(dir: &Path) -> io::Result<()> { + if fs::metadata(&dir).is_err() { + fs::create_dir(dir)?; + } + Ok(()) + } + } + + /// Fetch the root path. + pub fn dest(&self) -> &Path { + &self.root + } + /// Fetch the deps path. + pub fn deps(&self) -> &Path { + &self.deps + } + /// Fetch the examples path. + pub fn examples(&self) -> &Path { + &self.examples + } + /// Fetch the root path. + pub fn root(&self) -> &Path { + &self.root + } + /// Fetch the incremental path. + pub fn incremental(&self) -> &Path { + &self.incremental + } + /// Fetch the fingerprint path. + pub fn fingerprint(&self) -> &Path { + &self.fingerprint + } + /// Fetch the build path. + pub fn build(&self) -> &Path { + &self.build + } +} diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs new file mode 100644 index 000000000..0ab2ac8e7 --- /dev/null +++ b/src/cargo/core/compiler/mod.rs @@ -0,0 +1,1043 @@ +mod build_config; +mod build_context; +mod build_plan; +mod compilation; +mod context; +mod custom_build; +mod fingerprint; +mod job; +mod job_queue; +mod layout; +mod output_depinfo; + +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs; +use std::io::{self, Write}; +use std::path::{self, Path, PathBuf}; +use std::sync::Arc; + +use failure::Error; +use log::debug; +use same_file::is_same_file; +use serde::Serialize; + +use crate::core::manifest::TargetSourcePath; +use crate::core::profiles::{Lto, Profile}; +use crate::core::{PackageId, Target}; +use crate::util::errors::{CargoResult, CargoResultExt, Internal, ProcessError}; +use crate::util::paths; +use crate::util::{self, machine_message, process, Freshness, ProcessBuilder}; +use crate::util::{internal, join_paths, profile}; +pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; +pub use self::build_context::{BuildContext, FileFlavor, TargetConfig, TargetInfo}; +use self::build_plan::BuildPlan; +pub use self::compilation::{Compilation, Doctest}; +pub use self::context::{Context, Unit}; +pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts}; +use self::job::{Job, Work}; +use self::job_queue::JobQueue; +pub use self::layout::is_bad_artifact_name; +use self::output_depinfo::output_depinfo; + +/// Indicates whether an object is for the host architcture or the target architecture. +/// +/// These will be the same unless cross-compiling. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord, Serialize)] +pub enum Kind { + Host, + Target, +} + +/// A glorified callback for executing calls to rustc. Rather than calling rustc +/// directly, we'll use an `Executor`, giving clients an opportunity to intercept +/// the build calls. +pub trait Executor: Send + Sync + 'static { + /// Called after a rustc process invocation is prepared up-front for a given + /// unit of work (may still be modified for runtime-known dependencies, when + /// the work is actually executed). + fn init(&self, _cx: &Context<'_, '_>, _unit: &Unit<'_>) {} + + /// In case of an `Err`, Cargo will not continue with the build process for + /// this package. + fn exec( + &self, + cmd: ProcessBuilder, + _id: PackageId, + _target: &Target, + _mode: CompileMode, + ) -> CargoResult<()> { + cmd.exec()?; + Ok(()) + } + + fn exec_and_capture_output( + &self, + cmd: ProcessBuilder, + id: PackageId, + target: &Target, + mode: CompileMode, + _state: &job_queue::JobState<'_>, + ) -> CargoResult<()> { + // We forward to `exec()` to keep RLS working. + self.exec(cmd, id, target, mode) + } + + fn exec_json( + &self, + cmd: ProcessBuilder, + _id: PackageId, + _target: &Target, + _mode: CompileMode, + handle_stdout: &mut dyn FnMut(&str) -> CargoResult<()>, + handle_stderr: &mut dyn FnMut(&str) -> CargoResult<()>, + ) -> CargoResult<()> { + cmd.exec_with_streaming(handle_stdout, handle_stderr, false)?; + Ok(()) + } + + /// Queried when queuing each unit of work. If it returns true, then the + /// unit will always be rebuilt, independent of whether it needs to be. + fn force_rebuild(&self, _unit: &Unit<'_>) -> bool { + false + } +} + +/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's +/// default behaviour. +#[derive(Copy, Clone)] +pub struct DefaultExecutor; + +impl Executor for DefaultExecutor { + fn exec_and_capture_output( + &self, + cmd: ProcessBuilder, + _id: PackageId, + _target: &Target, + _mode: CompileMode, + state: &job_queue::JobState<'_>, + ) -> CargoResult<()> { + state.capture_output(&cmd, None, false).map(drop) + } +} + +fn compile<'a, 'cfg: 'a>( + cx: &mut Context<'a, 'cfg>, + jobs: &mut JobQueue<'a, 'cfg>, + plan: &mut BuildPlan, + unit: &Unit<'a>, + exec: &Arc, + force_rebuild: bool, +) -> CargoResult<()> { + let bcx = cx.bcx; + let build_plan = bcx.build_config.build_plan; + if !cx.compiled.insert(*unit) { + return Ok(()); + } + + // Build up the work to be done to compile this unit, enqueuing it once + // we've got everything constructed. + let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name())); + fingerprint::prepare_init(cx, unit)?; + cx.links.validate(bcx.resolve, unit)?; + + let (dirty, fresh, freshness) = if unit.mode.is_run_custom_build() { + custom_build::prepare(cx, unit)? + } else if unit.mode == CompileMode::Doctest { + // We run these targets later, so this is just a no-op for now. + (Work::noop(), Work::noop(), Freshness::Fresh) + } else if build_plan { + ( + rustc(cx, unit, &exec.clone())?, + Work::noop(), + Freshness::Dirty, + ) + } else { + let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?; + let work = if unit.mode.is_doc() { + rustdoc(cx, unit)? + } else { + rustc(cx, unit, exec)? + }; + // Need to link targets on both the dirty and fresh. + let dirty = work.then(link_targets(cx, unit, false)?).then(dirty); + let fresh = link_targets(cx, unit, true)?.then(fresh); + + if exec.force_rebuild(unit) || force_rebuild { + freshness = Freshness::Dirty; + } + + (dirty, fresh, freshness) + }; + jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?; + drop(p); + + // Be sure to compile all dependencies of this target as well. + for unit in cx.dep_targets(unit).iter() { + compile(cx, jobs, plan, unit, exec, false)?; + } + if build_plan { + plan.add(cx, unit)?; + } + + Ok(()) +} + +fn rustc<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + exec: &Arc, +) -> CargoResult { + let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; + if cx.is_primary_package(unit) { + rustc.env("CARGO_PRIMARY_PACKAGE", "1"); + } + let build_plan = cx.bcx.build_config.build_plan; + + let name = unit.pkg.name().to_string(); + let buildkey = unit.buildkey(); + + add_cap_lints(cx.bcx, unit, &mut rustc); + + let outputs = cx.outputs(unit)?; + let root = cx.files().out_dir(unit); + let kind = unit.kind; + + // Prepare the native lib state (extra `-L` and `-l` flags). + let build_state = cx.build_state.clone(); + let current_id = unit.pkg.package_id(); + let build_deps = load_build_deps(cx, unit); + + // If we are a binary and the package also contains a library, then we + // don't pass the `-l` flags. + let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib()); + let do_rename = unit.target.allows_underscores() && !unit.mode.is_any_test(); + let real_name = unit.target.name().to_string(); + let crate_name = unit.target.crate_name(); + + // Rely on `target_filenames` iterator as source of truth rather than rederiving filestem. + let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() { + root.join(&crate_name) + } else { + root.join(&cx.files().file_stem(unit)) + } + .with_extension("d"); + let dep_info_loc = fingerprint::dep_info_loc(cx, unit); + + rustc.args(&cx.bcx.rustflags_args(unit)?); + let json_messages = cx.bcx.build_config.json_messages(); + let package_id = unit.pkg.package_id(); + let target = unit.target.clone(); + let mode = unit.mode; + + exec.init(cx, unit); + let exec = exec.clone(); + + let root_output = cx.files().target_root().to_path_buf(); + let pkg_root = unit.pkg.root().to_path_buf(); + let cwd = rustc + .get_cwd() + .unwrap_or_else(|| cx.bcx.config.cwd()) + .to_path_buf(); + + return Ok(Work::new(move |state| { + // Only at runtime have we discovered what the extra -L and -l + // arguments are for native libraries, so we process those here. We + // also need to be sure to add any -L paths for our plugins to the + // dynamic library load path as a plugin's dynamic library may be + // located somewhere in there. + // Finally, if custom environment variables have been produced by + // previous build scripts, we include them in the rustc invocation. + if let Some(build_deps) = build_deps { + let build_state = build_state.outputs.lock().unwrap(); + if !build_plan { + add_native_deps( + &mut rustc, + &build_state, + &build_deps, + pass_l_flag, + current_id, + )?; + add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?; + } + add_custom_env(&mut rustc, &build_state, current_id, kind)?; + } + + for output in outputs.iter() { + // If there is both an rmeta and rlib, rustc will prefer to use the + // rlib, even if it is older. Therefore, we must delete the rlib to + // force using the new rmeta. + if output.path.extension() == Some(OsStr::new("rmeta")) { + let dst = root.join(&output.path).with_extension("rlib"); + if dst.exists() { + paths::remove_file(&dst)?; + } + } + } + + fn internal_if_simple_exit_code(err: Error) -> Error { + // If a signal on unix (`code == None`) or an abnormal termination + // on Windows (codes like `0xC0000409`), don't hide the error details. + match err + .downcast_ref::() + .as_ref() + .and_then(|perr| perr.exit.and_then(|e| e.code())) + { + Some(n) if n < 128 => Internal::new(err).into(), + _ => err, + } + } + + state.running(&rustc); + let timestamp = paths::get_current_filesystem_time(&dep_info_loc)?; + if json_messages { + exec.exec_json( + rustc, + package_id, + &target, + mode, + &mut assert_is_empty, + &mut |line| json_stderr(line, package_id, &target), + ) + .map_err(internal_if_simple_exit_code) + .chain_err(|| format!("Could not compile `{}`.", name))?; + } else if build_plan { + state.build_plan(buildkey, rustc.clone(), outputs.clone()); + } else { + exec.exec_and_capture_output(rustc, package_id, &target, mode, state) + .map_err(internal_if_simple_exit_code) + .chain_err(|| format!("Could not compile `{}`.", name))?; + } + + if do_rename && real_name != crate_name { + let dst = &outputs[0].path; + let src = dst.with_file_name( + dst.file_name() + .unwrap() + .to_str() + .unwrap() + .replace(&real_name, &crate_name), + ); + if src.exists() && src.file_name() != dst.file_name() { + fs::rename(&src, &dst) + .chain_err(|| internal(format!("could not rename crate {:?}", src)))?; + } + } + + if rustc_dep_info_loc.exists() { + fingerprint::translate_dep_info(&rustc_dep_info_loc, &dep_info_loc, &pkg_root, &cwd) + .chain_err(|| { + internal(format!( + "could not parse/generate dep info at: {}", + rustc_dep_info_loc.display() + )) + })?; + filetime::set_file_times(dep_info_loc, timestamp, timestamp)?; + } + + Ok(()) + })); + + // Add all relevant `-L` and `-l` flags from dependencies (now calculated and + // present in `state`) to the command provided. + fn add_native_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + pass_l_flag: bool, + current_id: PackageId, + ) -> CargoResult<()> { + for key in build_scripts.to_link.iter() { + let output = build_state.get(key).ok_or_else(|| { + internal(format!( + "couldn't find build state for {}/{:?}", + key.0, key.1 + )) + })?; + for path in output.library_paths.iter() { + rustc.arg("-L").arg(path); + } + if key.0 == current_id { + for cfg in &output.cfgs { + rustc.arg("--cfg").arg(cfg); + } + if pass_l_flag { + for name in output.library_links.iter() { + rustc.arg("-l").arg(name); + } + } + } + } + Ok(()) + } + + // Add all custom environment variables present in `state` (after they've + // been put there by one of the `build_scripts`) to the command provided. + fn add_custom_env( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + current_id: PackageId, + kind: Kind, + ) -> CargoResult<()> { + let key = (current_id, kind); + if let Some(output) = build_state.get(&key) { + for &(ref name, ref value) in output.env.iter() { + rustc.env(name, value); + } + } + Ok(()) + } +} + +/// Link the compiled target (often of form `foo-{metadata_hash}`) to the +/// final target. This must happen during both "Fresh" and "Compile". +fn link_targets<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + fresh: bool, +) -> CargoResult { + let bcx = cx.bcx; + let outputs = cx.outputs(unit)?; + let export_dir = cx.files().export_dir(); + let package_id = unit.pkg.package_id(); + let profile = unit.profile; + let unit_mode = unit.mode; + let features = bcx + .resolve + .features_sorted(package_id) + .into_iter() + .map(|s| s.to_owned()) + .collect(); + let json_messages = bcx.build_config.json_messages(); + let executable = cx.get_executable(unit)?; + let mut target = unit.target.clone(); + if let TargetSourcePath::Metabuild = target.src_path() { + // Give it something to serialize. + let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); + target.set_src_path(TargetSourcePath::Path(path)); + } + + Ok(Work::new(move |_| { + // If we're a "root crate", e.g., the target of this compilation, then we + // hard link our outputs out of the `deps` directory into the directory + // above. This means that `cargo build` will produce binaries in + // `target/debug` which one probably expects. + let mut destinations = vec![]; + for output in outputs.iter() { + let src = &output.path; + // This may have been a `cargo rustc` command which changes the + // output, so the source may not actually exist. + if !src.exists() { + continue; + } + let dst = match output.hardlink.as_ref() { + Some(dst) => dst, + None => { + destinations.push(src.clone()); + continue; + } + }; + destinations.push(dst.clone()); + hardlink_or_copy(src, dst)?; + if let Some(ref path) = output.export_path { + let export_dir = export_dir.as_ref().unwrap(); + if !export_dir.exists() { + fs::create_dir_all(export_dir)?; + } + + hardlink_or_copy(src, path)?; + } + } + + if json_messages { + let art_profile = machine_message::ArtifactProfile { + opt_level: profile.opt_level.as_str(), + debuginfo: profile.debuginfo, + debug_assertions: profile.debug_assertions, + overflow_checks: profile.overflow_checks, + test: unit_mode.is_any_test(), + }; + + machine_message::emit(&machine_message::Artifact { + package_id, + target: &target, + profile: art_profile, + features, + filenames: destinations, + executable, + fresh, + }); + } + Ok(()) + })) +} + +fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { + debug!("linking {} to {}", src.display(), dst.display()); + if is_same_file(src, dst).unwrap_or(false) { + return Ok(()); + } + if dst.exists() { + paths::remove_file(&dst)?; + } + + let link_result = if src.is_dir() { + #[cfg(target_os = "redox")] + use std::os::redox::fs::symlink; + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + let dst_dir = dst.parent().unwrap(); + let src = if src.starts_with(dst_dir) { + src.strip_prefix(dst_dir).unwrap() + } else { + src + }; + symlink(src, dst) + } else { + fs::hard_link(src, dst) + }; + link_result + .or_else(|err| { + debug!("link failed {}. falling back to fs::copy", err); + fs::copy(src, dst).map(|_| ()) + }) + .chain_err(|| { + format!( + "failed to link or copy `{}` to `{}`", + src.display(), + dst.display() + ) + })?; + Ok(()) +} + +fn load_build_deps(cx: &Context<'_, '_>, unit: &Unit<'_>) -> Option> { + cx.build_scripts.get(unit).cloned() +} + +// For all plugin dependencies, add their -L paths (now calculated and +// present in `state`) to the dynamic library load path for the command to +// execute. +fn add_plugin_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + root_output: &PathBuf, +) -> CargoResult<()> { + let var = util::dylib_path_envvar(); + let search_path = rustc.get_env(var).unwrap_or_default(); + let mut search_path = env::split_paths(&search_path).collect::>(); + for &id in build_scripts.plugins.iter() { + let output = build_state + .get(&(id, Kind::Host)) + .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?; + search_path.append(&mut filter_dynamic_search_path( + output.library_paths.iter(), + root_output, + )); + } + let search_path = join_paths(&search_path, var)?; + rustc.env(var, &search_path); + Ok(()) +} + +// Determine paths to add to the dynamic search path from -L entries +// +// Strip off prefixes like "native=" or "framework=" and filter out directories +// **not** inside our output directory since they are likely spurious and can cause +// clashes with system shared libraries (issue #3366). +fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec +where + I: Iterator, +{ + let mut search_path = vec![]; + for dir in paths { + let dir = match dir.to_str() { + Some(s) => { + let mut parts = s.splitn(2, '='); + match (parts.next(), parts.next()) { + (Some("native"), Some(path)) + | (Some("crate"), Some(path)) + | (Some("dependency"), Some(path)) + | (Some("framework"), Some(path)) + | (Some("all"), Some(path)) => path.into(), + _ => dir.clone(), + } + } + None => dir.clone(), + }; + if dir.starts_with(&root_output) { + search_path.push(dir); + } else { + debug!( + "Not including path {} in runtime library search path because it is \ + outside target root {}", + dir.display(), + root_output.display() + ); + } + } + search_path +} + +fn prepare_rustc<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + crate_types: &[&str], + unit: &Unit<'a>, +) -> CargoResult { + let mut base = cx.compilation.rustc_process(unit.pkg, unit.target)?; + base.inherit_jobserver(&cx.jobserver); + build_base_args(cx, &mut base, unit, crate_types)?; + build_deps_args(&mut base, cx, unit)?; + Ok(base) +} + +fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { + let bcx = cx.bcx; + let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg, unit.target)?; + rustdoc.inherit_jobserver(&cx.jobserver); + rustdoc.arg("--crate-name").arg(&unit.target.crate_name()); + add_path_args(bcx, unit, &mut rustdoc); + add_cap_lints(bcx, unit, &mut rustdoc); + + let mut can_add_color_process = process(&*bcx.config.rustdoc()?); + can_add_color_process.args(&["--color", "never", "-V"]); + if bcx.rustc.cached_success(&can_add_color_process)? { + add_color(bcx, &mut rustdoc); + } + + if unit.kind != Kind::Host { + if let Some(ref target) = bcx.build_config.requested_target { + rustdoc.arg("--target").arg(target); + } + } + + let doc_dir = cx.files().out_dir(unit); + + // Create the documentation directory ahead of time as rustdoc currently has + // a bug where concurrent invocations will race to create this directory if + // it doesn't already exist. + fs::create_dir_all(&doc_dir)?; + + rustdoc.arg("-o").arg(doc_dir); + + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + add_error_format(bcx, &mut rustdoc); + + if let Some(ref args) = bcx.extra_args_for(unit) { + rustdoc.args(args); + } + + build_deps_args(&mut rustdoc, cx, unit)?; + + rustdoc.args(&bcx.rustdocflags_args(unit)?); + + let name = unit.pkg.name().to_string(); + let build_state = cx.build_state.clone(); + let key = (unit.pkg.package_id(), unit.kind); + let json_messages = bcx.build_config.json_messages(); + let package_id = unit.pkg.package_id(); + let target = unit.target.clone(); + + Ok(Work::new(move |state| { + if let Some(output) = build_state.outputs.lock().unwrap().get(&key) { + for cfg in output.cfgs.iter() { + rustdoc.arg("--cfg").arg(cfg); + } + for &(ref name, ref value) in output.env.iter() { + rustdoc.env(name, value); + } + } + state.running(&rustdoc); + + let exec_result = if json_messages { + rustdoc + .exec_with_streaming( + &mut assert_is_empty, + &mut |line| json_stderr(line, package_id, &target), + false, + ) + .map(drop) + } else { + state.capture_output(&rustdoc, None, false).map(drop) + }; + exec_result.chain_err(|| format!("Could not document `{}`.", name))?; + Ok(()) + })) +} + +// The path that we pass to rustc is actually fairly important because it will +// show up in error messages (important for readability), debug information +// (important for caching), etc. As a result we need to be pretty careful how we +// actually invoke rustc. +// +// In general users don't expect `cargo build` to cause rebuilds if you change +// directories. That could be if you just change directories in the package or +// if you literally move the whole package wholesale to a new directory. As a +// result we mostly don't factor in `cwd` to this calculation. Instead we try to +// track the workspace as much as possible and we update the current directory +// of rustc/rustdoc where appropriate. +// +// The first returned value here is the argument to pass to rustc, and the +// second is the cwd that rustc should operate in. +fn path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>) -> (PathBuf, PathBuf) { + let ws_root = bcx.ws.root(); + let src = match unit.target.src_path() { + TargetSourcePath::Path(path) => path.to_path_buf(), + TargetSourcePath::Metabuild => unit.pkg.manifest().metabuild_path(bcx.ws.target_dir()), + }; + assert!(src.is_absolute()); + if unit.pkg.package_id().source_id().is_path() { + if let Ok(path) = src.strip_prefix(ws_root) { + return (path.to_path_buf(), ws_root.to_path_buf()); + } + } + (src, unit.pkg.root().to_path_buf()) +} + +fn add_path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { + let (arg, cwd) = path_args(bcx, unit); + cmd.arg(arg); + cmd.cwd(cwd); +} + +fn add_cap_lints(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { + // If this is an upstream dep we don't want warnings from, turn off all + // lints. + if !bcx.show_warnings(unit.pkg.package_id()) { + cmd.arg("--cap-lints").arg("allow"); + + // If this is an upstream dep but we *do* want warnings, make sure that they + // don't fail compilation. + } else if !unit.pkg.package_id().source_id().is_path() { + cmd.arg("--cap-lints").arg("warn"); + } +} + +fn add_color(bcx: &BuildContext<'_, '_>, cmd: &mut ProcessBuilder) { + let shell = bcx.config.shell(); + let color = if shell.supports_color() { + "always" + } else { + "never" + }; + cmd.args(&["--color", color]); +} + +fn add_error_format(bcx: &BuildContext<'_, '_>, cmd: &mut ProcessBuilder) { + match bcx.build_config.message_format { + MessageFormat::Human => (), + MessageFormat::Json => { + cmd.arg("--error-format").arg("json"); + } + MessageFormat::Short => { + cmd.arg("--error-format").arg("short"); + } + } +} + +fn build_base_args<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + cmd: &mut ProcessBuilder, + unit: &Unit<'a>, + crate_types: &[&str], +) -> CargoResult<()> { + assert!(!unit.mode.is_run_custom_build()); + + let bcx = cx.bcx; + let Profile { + ref opt_level, + ref lto, + codegen_units, + debuginfo, + debug_assertions, + overflow_checks, + rpath, + ref panic, + incremental, + .. + } = unit.profile; + let test = unit.mode.is_any_test(); + + cmd.arg("--crate-name").arg(&unit.target.crate_name()); + + add_path_args(bcx, unit, cmd); + add_color(bcx, cmd); + add_error_format(bcx, cmd); + + if !test { + for crate_type in crate_types.iter() { + cmd.arg("--crate-type").arg(crate_type); + } + } + + if unit.mode.is_check() { + cmd.arg("--emit=dep-info,metadata"); + } else { + cmd.arg("--emit=dep-info,link"); + } + + let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build()) + || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg)); + if prefer_dynamic { + cmd.arg("-C").arg("prefer-dynamic"); + } + + if opt_level.as_str() != "0" { + cmd.arg("-C").arg(&format!("opt-level={}", opt_level)); + } + + if let Some(panic) = panic.as_ref() { + cmd.arg("-C").arg(format!("panic={}", panic)); + } + + // Disable LTO for host builds as prefer_dynamic and it are mutually + // exclusive. + if unit.target.can_lto() && !unit.target.for_host() { + match *lto { + Lto::Bool(false) => {} + Lto::Bool(true) => { + cmd.args(&["-C", "lto"]); + } + Lto::Named(ref s) => { + cmd.arg("-C").arg(format!("lto={}", s)); + } + } + } + + if let Some(n) = codegen_units { + // There are some restrictions with LTO and codegen-units, so we + // only add codegen units when LTO is not used. + cmd.arg("-C").arg(&format!("codegen-units={}", n)); + } + + if let Some(debuginfo) = debuginfo { + cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); + } + + if let Some(ref args) = bcx.extra_args_for(unit) { + cmd.args(args); + } + + // `-C overflow-checks` is implied by the setting of `-C debug-assertions`, + // so we only need to provide `-C overflow-checks` if it differs from + // the value of `-C debug-assertions` we would provide. + if opt_level.as_str() != "0" { + if debug_assertions { + cmd.args(&["-C", "debug-assertions=on"]); + if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + } else if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !debug_assertions { + cmd.args(&["-C", "debug-assertions=off"]); + if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + + if test && unit.target.harness() { + cmd.arg("--test"); + } else if test { + cmd.arg("--cfg").arg("test"); + } + + // We ideally want deterministic invocations of rustc to ensure that + // rustc-caching strategies like sccache are able to cache more, so sort the + // feature list here. + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + match cx.files().metadata(unit) { + Some(m) => { + cmd.arg("-C").arg(&format!("metadata={}", m)); + cmd.arg("-C").arg(&format!("extra-filename=-{}", m)); + } + None => { + cmd.arg("-C") + .arg(&format!("metadata={}", cx.files().target_short_hash(unit))); + } + } + + if rpath { + cmd.arg("-C").arg("rpath"); + } + + cmd.arg("--out-dir").arg(&cx.files().out_dir(unit)); + + fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) { + if let Some(val) = val { + let mut joined = OsString::from(prefix); + joined.push(val); + cmd.arg(key).arg(joined); + } + } + + if unit.kind == Kind::Target { + opt( + cmd, + "--target", + "", + bcx.build_config + .requested_target + .as_ref() + .map(|s| s.as_ref()), + ); + } + + opt(cmd, "-C", "ar=", bcx.ar(unit.kind).map(|s| s.as_ref())); + opt( + cmd, + "-C", + "linker=", + bcx.linker(unit.kind).map(|s| s.as_ref()), + ); + if incremental { + let dir = cx.files().layout(unit.kind).incremental().as_os_str(); + opt(cmd, "-C", "incremental=", Some(dir)); + } + Ok(()) +} + +fn build_deps_args<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<()> { + let bcx = cx.bcx; + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().deps_dir(unit)); + deps + }); + + // Be sure that the host path is also listed. This'll ensure that proc macro + // dependencies are correctly found (for reexported macros). + if let Kind::Target = unit.kind { + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().host_deps()); + deps + }); + } + + let dep_targets = cx.dep_targets(unit); + + // If there is not one linkable target but should, rustc fails later + // on if there is an `extern crate` for it. This may turn into a hard + // error in the future (see PR #4797). + if !dep_targets + .iter() + .any(|u| !u.mode.is_doc() && u.target.linkable()) + { + if let Some(u) = dep_targets + .iter() + .find(|u| !u.mode.is_doc() && u.target.is_lib()) + { + bcx.config.shell().warn(format!( + "The package `{}` \ + provides no linkable target. The compiler might raise an error while compiling \ + `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \ + Cargo.toml. This warning might turn into a hard error in the future.", + u.target.crate_name(), + unit.target.crate_name(), + u.target.crate_name() + ))?; + } + } + + for dep in dep_targets { + if dep.mode.is_run_custom_build() { + cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep)); + } + if dep.target.linkable() && !dep.mode.is_doc() { + link_to(cmd, cx, unit, &dep)?; + } + } + + return Ok(()); + + fn link_to<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + current: &Unit<'a>, + dep: &Unit<'a>, + ) -> CargoResult<()> { + let bcx = cx.bcx; + for output in cx.outputs(dep)?.iter() { + if output.flavor != FileFlavor::Linkable { + continue; + } + let mut v = OsString::new(); + let name = bcx.extern_crate_name(current, dep)?; + v.push(name); + v.push("="); + v.push(cx.files().out_dir(dep)); + v.push(&path::MAIN_SEPARATOR.to_string()); + v.push(&output.path.file_name().unwrap()); + cmd.arg("--extern").arg(&v); + } + Ok(()) + } +} + +fn envify(s: &str) -> String { + s.chars() + .flat_map(|c| c.to_uppercase()) + .map(|c| if c == '-' { '_' } else { c }) + .collect() +} + +impl Kind { + fn for_target(self, target: &Target) -> Kind { + // Once we start compiling for the `Host` kind we continue doing so, but + // if we are a `Target` kind and then we start compiling for a target + // that needs to be on the host we lift ourselves up to `Host`. + match self { + Kind::Host => Kind::Host, + Kind::Target if target.for_host() => Kind::Host, + Kind::Target => Kind::Target, + } + } +} + +fn assert_is_empty(line: &str) -> CargoResult<()> { + if !line.is_empty() { + Err(internal(&format!( + "compiler stdout is not empty: `{}`", + line + ))) + } else { + Ok(()) + } +} + +fn json_stderr(line: &str, package_id: PackageId, target: &Target) -> CargoResult<()> { + // Stderr from rustc/rustdoc can have a mix of JSON and non-JSON output. + if line.starts_with('{') { + // Handle JSON lines. + let compiler_message = serde_json::from_str(line) + .map_err(|_| internal(&format!("compiler produced invalid json: `{}`", line)))?; + + machine_message::emit(&machine_message::FromCompiler { + package_id, + target, + message: compiler_message, + }); + } else { + // Forward non-JSON to stderr. + writeln!(io::stderr(), "{}", line)?; + } + Ok(()) +} diff --git a/src/cargo/core/compiler/output_depinfo.rs b/src/cargo/core/compiler/output_depinfo.rs new file mode 100644 index 000000000..1c192db86 --- /dev/null +++ b/src/cargo/core/compiler/output_depinfo.rs @@ -0,0 +1,128 @@ +use std::collections::{BTreeSet, HashSet}; +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::path::{Path, PathBuf}; + +use log::debug; + +use super::{fingerprint, Context, Unit}; +use crate::util::paths; +use crate::util::{internal, CargoResult}; + +fn render_filename>(path: P, basedir: Option<&str>) -> CargoResult { + let path = path.as_ref(); + let relpath = match basedir { + None => path, + Some(base) => match path.strip_prefix(base) { + Ok(relpath) => relpath, + _ => path, + }, + }; + relpath + .to_str() + .ok_or_else(|| internal("path not utf-8")) + .map(|f| f.replace(" ", "\\ ")) +} + +fn add_deps_for_unit<'a, 'b>( + deps: &mut BTreeSet, + context: &mut Context<'a, 'b>, + unit: &Unit<'a>, + visited: &mut HashSet>, +) -> CargoResult<()> { + if !visited.insert(*unit) { + return Ok(()); + } + + // units representing the execution of a build script don't actually + // generate a dep info file, so we just keep on going below + if !unit.mode.is_run_custom_build() { + // Add dependencies from rustc dep-info output (stored in fingerprint directory) + let dep_info_loc = fingerprint::dep_info_loc(context, unit); + if let Some(paths) = fingerprint::parse_dep_info(unit.pkg, &dep_info_loc)? { + for path in paths { + deps.insert(path); + } + } else { + debug!( + "can't find dep_info for {:?} {}", + unit.pkg.package_id(), + unit.target + ); + return Err(internal("dep_info missing")); + } + } + + // Add rerun-if-changed dependencies + let key = (unit.pkg.package_id(), unit.kind); + if let Some(output) = context.build_state.outputs.lock().unwrap().get(&key) { + for path in &output.rerun_if_changed { + deps.insert(path.into()); + } + } + + // Recursively traverse all transitive dependencies + for dep_unit in context.dep_targets(unit).iter() { + let source_id = dep_unit.pkg.package_id().source_id(); + if source_id.is_path() { + add_deps_for_unit(deps, context, dep_unit, visited)?; + } + } + Ok(()) +} + +pub fn output_depinfo<'a, 'b>(cx: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> { + let bcx = cx.bcx; + let mut deps = BTreeSet::new(); + let mut visited = HashSet::new(); + let success = add_deps_for_unit(&mut deps, cx, unit, &mut visited).is_ok(); + let basedir_string; + let basedir = match bcx.config.get_path("build.dep-info-basedir")? { + Some(value) => { + basedir_string = value + .val + .as_os_str() + .to_str() + .ok_or_else(|| internal("build.dep-info-basedir path not utf-8"))? + .to_string(); + Some(basedir_string.as_str()) + } + None => None, + }; + let deps = deps + .iter() + .map(|f| render_filename(f, basedir)) + .collect::>>()?; + + for output in cx.outputs(unit)?.iter() { + if let Some(ref link_dst) = output.hardlink { + let output_path = link_dst.with_extension("d"); + if success { + let target_fn = render_filename(link_dst, basedir)?; + + // If nothing changed don't recreate the file which could alter + // its mtime + if let Ok(previous) = fingerprint::parse_rustc_dep_info(&output_path) { + if previous.len() == 1 && previous[0].0 == target_fn && previous[0].1 == deps { + continue; + } + } + + // Otherwise write it all out + let mut outfile = BufWriter::new(File::create(output_path)?); + write!(outfile, "{}:", target_fn)?; + for dep in &deps { + write!(outfile, " {}", dep)?; + } + writeln!(outfile)?; + + // dep-info generation failed, so delete output file. This will + // usually cause the build system to always rerun the build + // rule, which is correct if inefficient. + } else if output_path.exists() { + paths::remove_file(output_path)?; + } + } + } + Ok(()) +} diff --git a/src/cargo/core/dependency.rs b/src/cargo/core/dependency.rs new file mode 100644 index 000000000..e2a27e043 --- /dev/null +++ b/src/cargo/core/dependency.rs @@ -0,0 +1,486 @@ +use std::fmt; +use std::rc::Rc; +use std::str::FromStr; + +use log::trace; +use semver::ReqParseError; +use semver::VersionReq; +use serde::ser; +use serde::Serialize; +use url::Url; + +use crate::core::interning::InternedString; +use crate::core::{PackageId, SourceId, Summary}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{Cfg, CfgExpr, Config}; + +/// Information about a dependency requested by a Cargo manifest. +/// Cheap to copy. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +pub struct Dependency { + inner: Rc, +} + +/// The data underlying a `Dependency`. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +struct Inner { + name: InternedString, + source_id: SourceId, + /// Source ID for the registry as specified in the manifest. + /// + /// This will be None if it is not specified (crates.io dependency). + /// This is different from `source_id` for example when both a `path` and + /// `registry` is specified. Or in the case of a crates.io dependency, + /// `source_id` will be crates.io and this will be None. + registry_id: Option, + req: VersionReq, + specified_req: bool, + kind: Kind, + only_match_name: bool, + explicit_name_in_toml: Option, + + optional: bool, + default_features: bool, + features: Vec, + + // This dependency should be used only for this platform. + // `None` means *all platforms*. + platform: Option, +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Platform { + Name(String), + Cfg(CfgExpr), +} + +#[derive(Serialize)] +struct SerializedDependency<'a> { + name: &'a str, + source: SourceId, + req: String, + kind: Kind, + rename: Option<&'a str>, + + optional: bool, + uses_default_features: bool, + features: &'a [InternedString], + target: Option<&'a Platform>, + /// The registry URL this dependency is from. + /// If None, then it comes from the default registry (crates.io). + #[serde(with = "url_serde")] + registry: Option, +} + +impl ser::Serialize for Dependency { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + SerializedDependency { + name: &*self.package_name(), + source: self.source_id(), + req: self.version_req().to_string(), + kind: self.kind(), + optional: self.is_optional(), + uses_default_features: self.uses_default_features(), + features: self.features(), + target: self.platform(), + rename: self.explicit_name_in_toml().map(|s| s.as_str()), + registry: self.registry_id().map(|sid| sid.url().clone()), + } + .serialize(s) + } +} + +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)] +pub enum Kind { + Normal, + Development, + Build, +} + +fn parse_req_with_deprecated( + name: &str, + req: &str, + extra: Option<(PackageId, &Config)>, +) -> CargoResult { + match VersionReq::parse(req) { + Err(ReqParseError::DeprecatedVersionRequirement(requirement)) => { + let (inside, config) = match extra { + Some(pair) => pair, + None => return Err(ReqParseError::DeprecatedVersionRequirement(requirement).into()), + }; + let msg = format!( + "\ +parsed version requirement `{}` is no longer valid + +Previous versions of Cargo accepted this malformed requirement, +but it is being deprecated. This was found when parsing the manifest +of {} {}, and the correct version requirement is `{}`. + +This will soon become a hard error, so it's either recommended to +update to a fixed version or contact the upstream maintainer about +this warning. +", + req, + inside.name(), + inside.version(), + requirement + ); + config.shell().warn(&msg)?; + + Ok(requirement) + } + Err(e) => { + let err: CargoResult = Err(e.into()); + let v: VersionReq = err.chain_err(|| { + format!( + "failed to parse the version requirement `{}` for dependency `{}`", + req, name + ) + })?; + Ok(v) + } + Ok(v) => Ok(v), + } +} + +impl ser::Serialize for Kind { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + Kind::Normal => None, + Kind::Development => Some("dev"), + Kind::Build => Some("build"), + } + .serialize(s) + } +} + +impl Dependency { + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse( + name: &str, + version: Option<&str>, + source_id: SourceId, + inside: PackageId, + config: &Config, + ) -> CargoResult { + let arg = Some((inside, config)); + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(name, v, arg)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse_no_deprecated( + name: &str, + version: Option<&str>, + source_id: SourceId, + ) -> CargoResult { + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(name, v, None)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + pub fn new_override(name: &str, source_id: SourceId) -> Dependency { + assert!(!name.is_empty()); + Dependency { + inner: Rc::new(Inner { + name: InternedString::new(name), + source_id, + registry_id: None, + req: VersionReq::any(), + kind: Kind::Normal, + only_match_name: true, + optional: false, + features: Vec::new(), + default_features: true, + specified_req: false, + platform: None, + explicit_name_in_toml: None, + }), + } + } + + pub fn version_req(&self) -> &VersionReq { + &self.inner.req + } + + /// This is the name of this `Dependency` as listed in `Cargo.toml`. + /// + /// Or in other words, this is what shows up in the `[dependencies]` section + /// on the left hand side. This is *not* the name of the package that's + /// being depended on as the dependency can be renamed. For that use + /// `package_name` below. + /// + /// Both of the dependencies below return `foo` for `name_in_toml`: + /// + /// ```toml + /// [dependencies] + /// foo = "0.1" + /// ``` + /// + /// and ... + /// + /// ```toml + /// [dependencies] + /// foo = { version = "0.1", package = 'bar' } + /// ``` + pub fn name_in_toml(&self) -> InternedString { + self.explicit_name_in_toml().unwrap_or(self.inner.name) + } + + /// The name of the package that this `Dependency` depends on. + /// + /// Usually this is what's written on the left hand side of a dependencies + /// section, but it can also be renamed via the `package` key. + /// + /// Both of the dependencies below return `foo` for `package_name`: + /// + /// ```toml + /// [dependencies] + /// foo = "0.1" + /// ``` + /// + /// and ... + /// + /// ```toml + /// [dependencies] + /// bar = { version = "0.1", package = 'foo' } + /// ``` + pub fn package_name(&self) -> InternedString { + self.inner.name + } + + pub fn source_id(&self) -> SourceId { + self.inner.source_id + } + + pub fn registry_id(&self) -> Option { + self.inner.registry_id + } + + pub fn set_registry_id(&mut self, registry_id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).registry_id = Some(registry_id); + self + } + + pub fn kind(&self) -> Kind { + self.inner.kind + } + + pub fn specified_req(&self) -> bool { + self.inner.specified_req + } + + /// If none, this dependencies must be built for all platforms. + /// If some, it must only be built for the specified platform. + pub fn platform(&self) -> Option<&Platform> { + self.inner.platform.as_ref() + } + + /// The renamed name of this dependency, if any. + /// + /// If the `package` key is used in `Cargo.toml` then this returns the same + /// value as `name_in_toml`. + pub fn explicit_name_in_toml(&self) -> Option { + self.inner.explicit_name_in_toml + } + + pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency { + Rc::make_mut(&mut self.inner).kind = kind; + self + } + + /// Sets the list of features requested for the package. + pub fn set_features( + &mut self, + features: impl IntoIterator>, + ) -> &mut Dependency { + Rc::make_mut(&mut self.inner).features = features + .into_iter() + .map(|s| InternedString::new(s.as_ref())) + .collect(); + self + } + + /// Sets whether the dependency requests default features of the package. + pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).default_features = default_features; + self + } + + /// Sets whether the dependency is optional. + pub fn set_optional(&mut self, optional: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).optional = optional; + self + } + + /// Sets the source ID for this dependency. + pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).source_id = id; + self + } + + /// Sets the version requirement for this dependency. + pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency { + Rc::make_mut(&mut self.inner).req = req; + self + } + + pub fn set_platform(&mut self, platform: Option) -> &mut Dependency { + Rc::make_mut(&mut self.inner).platform = platform; + self + } + + pub fn set_explicit_name_in_toml(&mut self, name: &str) -> &mut Dependency { + Rc::make_mut(&mut self.inner).explicit_name_in_toml = Some(InternedString::new(name)); + self + } + + /// Locks this dependency to depending on the specified package ID. + pub fn lock_to(&mut self, id: PackageId) -> &mut Dependency { + assert_eq!(self.inner.source_id, id.source_id()); + assert!(self.inner.req.matches(id.version())); + trace!( + "locking dep from `{}` with `{}` at {} to {}", + self.package_name(), + self.version_req(), + self.source_id(), + id + ); + self.set_version_req(VersionReq::exact(id.version())) + .set_source_id(id.source_id()) + } + + /// Returns `true` if this is a "locked" dependency, basically whether it has + /// an exact version req. + pub fn is_locked(&self) -> bool { + // Kind of a hack to figure this out, but it works! + self.inner.req.to_string().starts_with('=') + } + + /// Returns `false` if the dependency is only used to build the local package. + pub fn is_transitive(&self) -> bool { + match self.inner.kind { + Kind::Normal | Kind::Build => true, + Kind::Development => false, + } + } + + pub fn is_build(&self) -> bool { + match self.inner.kind { + Kind::Build => true, + _ => false, + } + } + + pub fn is_optional(&self) -> bool { + self.inner.optional + } + + /// Returns `true` if the default features of the dependency are requested. + pub fn uses_default_features(&self) -> bool { + self.inner.default_features + } + /// Returns the list of features that are requested by the dependency. + pub fn features(&self) -> &[InternedString] { + &self.inner.features + } + + /// Returns `true` if the package (`sum`) can fulfill this dependency request. + pub fn matches(&self, sum: &Summary) -> bool { + self.matches_id(sum.package_id()) + } + + /// Returns `true` if the package (`sum`) can fulfill this dependency request. + pub fn matches_ignoring_source(&self, id: PackageId) -> bool { + self.package_name() == id.name() && self.version_req().matches(id.version()) + } + + /// Returns `true` if the package (`id`) can fulfill this dependency request. + pub fn matches_id(&self, id: PackageId) -> bool { + self.inner.name == id.name() + && (self.inner.only_match_name + || (self.inner.req.matches(id.version()) && self.inner.source_id == id.source_id())) + } + + pub fn map_source(mut self, to_replace: SourceId, replace_with: SourceId) -> Dependency { + if self.source_id() != to_replace { + self + } else { + self.set_source_id(replace_with); + self + } + } +} + +impl Platform { + pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool { + match *self { + Platform::Name(ref p) => p == name, + Platform::Cfg(ref p) => match cfg { + Some(cfg) => p.matches(cfg), + None => false, + }, + } + } +} + +impl ser::Serialize for Platform { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl FromStr for Platform { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + if s.starts_with("cfg(") && s.ends_with(')') { + let s = &s[4..s.len() - 1]; + let p = s.parse().map(Platform::Cfg).chain_err(|| { + failure::format_err!("failed to parse `{}` as a cfg expression", s) + })?; + Ok(p) + } else { + Ok(Platform::Name(s.to_string())) + } + } +} + +impl fmt::Display for Platform { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Platform::Name(ref n) => n.fmt(f), + Platform::Cfg(ref e) => write!(f, "cfg({})", e), + } + } +} diff --git a/src/cargo/core/features.rs b/src/cargo/core/features.rs new file mode 100644 index 000000000..4daed85d7 --- /dev/null +++ b/src/cargo/core/features.rs @@ -0,0 +1,426 @@ +//! Support for nightly features in Cargo itself. +//! +//! This file is the version of `feature_gate.rs` in upstream Rust for Cargo +//! itself and is intended to be the avenue for which new features in Cargo are +//! gated by default and then eventually stabilized. All known stable and +//! unstable features are tracked in this file. +//! +//! If you're reading this then you're likely interested in adding a feature to +//! Cargo, and the good news is that it shouldn't be too hard! To do this you'll +//! want to follow these steps: +//! +//! 1. Add your feature. Do this by searching for "look here" in this file and +//! expanding the macro invocation that lists all features with your new +//! feature. +//! +//! 2. Find the appropriate place to place the feature gate in Cargo itself. If +//! you're extending the manifest format you'll likely just want to modify +//! the `Manifest::feature_gate` function, but otherwise you may wish to +//! place the feature gate elsewhere in Cargo. +//! +//! 3. To actually perform the feature gate, you'll want to have code that looks +//! like: +//! +//! ```rust,ignore +//! use core::{Feature, Features}; +//! +//! let feature = Feature::launch_into_space(); +//! package.manifest().features().require(feature).chain_err(|| { +//! "launching Cargo into space right now is unstable and may result in \ +//! unintended damage to your codebase, use with caution" +//! })?; +//! ``` +//! +//! Notably you'll notice the `require` function called with your `Feature`, and +//! then you use `chain_err` to tack on more context for why the feature was +//! required when the feature isn't activated. +//! +//! 4. Update the unstable documentation at +//! `src/doc/src/reference/unstable.md` to include a short description of +//! how to use your new feature. When the feature is stabilized, be sure +//! that the Cargo Guide or Reference is updated to fully document the +//! feature and remove the entry from the Unstable section. +//! +//! And hopefully that's it! Bear with us though that this is, at the time of +//! this writing, a very new feature in Cargo. If the process differs from this +//! we'll be sure to update this documentation! + +use std::cell::Cell; +use std::env; +use std::fmt; +use std::str::FromStr; + +use failure::Error; +use serde::{Deserialize, Serialize}; + +use crate::util::errors::CargoResult; + +/// The edition of the compiler (RFC 2052) +#[derive(Clone, Copy, Debug, Hash, PartialOrd, Ord, Eq, PartialEq, Serialize, Deserialize)] +pub enum Edition { + /// The 2015 edition + Edition2015, + /// The 2018 edition + Edition2018, +} + +impl fmt::Display for Edition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Edition::Edition2015 => f.write_str("2015"), + Edition::Edition2018 => f.write_str("2018"), + } + } +} +impl FromStr for Edition { + type Err = Error; + fn from_str(s: &str) -> Result { + match s { + "2015" => Ok(Edition::Edition2015), + "2018" => Ok(Edition::Edition2018), + s => failure::bail!( + "supported edition values are `2015` or `2018`, but `{}` \ + is unknown", + s + ), + } + } +} + +#[derive(PartialEq)] +enum Status { + Stable, + Unstable, +} + +macro_rules! features { + ( + pub struct Features { + $([$stab:ident] $feature:ident: bool,)* + } + ) => ( + #[derive(Default, Clone, Debug)] + pub struct Features { + $($feature: bool,)* + activated: Vec, + } + + impl Feature { + $( + pub fn $feature() -> &'static Feature { + fn get(features: &Features) -> bool { + stab!($stab) == Status::Stable || features.$feature + } + static FEAT: Feature = Feature { + name: stringify!($feature), + get, + }; + &FEAT + } + )* + + fn is_enabled(&self, features: &Features) -> bool { + (self.get)(features) + } + } + + impl Features { + fn status(&mut self, feature: &str) -> Option<(&mut bool, Status)> { + if feature.contains("_") { + return None + } + let feature = feature.replace("-", "_"); + $( + if feature == stringify!($feature) { + return Some((&mut self.$feature, stab!($stab))) + } + )* + None + } + } + ) +} + +macro_rules! stab { + (stable) => { + Status::Stable + }; + (unstable) => { + Status::Unstable + }; +} + +// A listing of all features in Cargo. +// +// "look here" +// +// This is the macro that lists all stable and unstable features in Cargo. +// You'll want to add to this macro whenever you add a feature to Cargo, also +// following the directions above. +// +// Note that all feature names here are valid Rust identifiers, but the `_` +// character is translated to `-` when specified in the `cargo-features` +// manifest entry in `Cargo.toml`. +features! { + pub struct Features { + + // A dummy feature that doesn't actually gate anything, but it's used in + // testing to ensure that we can enable stable features. + [stable] test_dummy_stable: bool, + + // A dummy feature that gates the usage of the `im-a-teapot` manifest + // entry. This is basically just intended for tests. + [unstable] test_dummy_unstable: bool, + + // Downloading packages from alternative registry indexes. + [stable] alternative_registries: bool, + + // Using editions + [stable] edition: bool, + + // Renaming a package in the manifest via the `package` key + [stable] rename_dependency: bool, + + // Whether a lock file is published with this crate + [unstable] publish_lockfile: bool, + + // Overriding profiles for dependencies. + [unstable] profile_overrides: bool, + + // Separating the namespaces for features and dependencies + [unstable] namespaced_features: bool, + + // "default-run" manifest option, + [unstable] default_run: bool, + + // Declarative build scripts. + [unstable] metabuild: bool, + } +} + +pub struct Feature { + name: &'static str, + get: fn(&Features) -> bool, +} + +impl Features { + pub fn new(features: &[String], warnings: &mut Vec) -> CargoResult { + let mut ret = Features::default(); + for feature in features { + ret.add(feature, warnings)?; + ret.activated.push(feature.to_string()); + } + Ok(ret) + } + + fn add(&mut self, feature: &str, warnings: &mut Vec) -> CargoResult<()> { + let (slot, status) = match self.status(feature) { + Some(p) => p, + None => failure::bail!("unknown cargo feature `{}`", feature), + }; + + if *slot { + failure::bail!("the cargo feature `{}` has already been activated", feature); + } + + match status { + Status::Stable => { + let warning = format!( + "the cargo feature `{}` is now stable \ + and is no longer necessary to be listed \ + in the manifest", + feature + ); + warnings.push(warning); + } + Status::Unstable if !nightly_features_allowed() => failure::bail!( + "the cargo feature `{}` requires a nightly version of \ + Cargo, but this is the `{}` channel", + feature, + channel() + ), + Status::Unstable => {} + } + + *slot = true; + + Ok(()) + } + + pub fn activated(&self) -> &[String] { + &self.activated + } + + pub fn require(&self, feature: &Feature) -> CargoResult<()> { + if feature.is_enabled(self) { + Ok(()) + } else { + let feature = feature.name.replace("_", "-"); + let mut msg = format!("feature `{}` is required", feature); + + if nightly_features_allowed() { + let s = format!( + "\n\nconsider adding `cargo-features = [\"{0}\"]` \ + to the manifest", + feature + ); + msg.push_str(&s); + } else { + let s = format!( + "\n\n\ + this Cargo does not support nightly features, but if you\n\ + switch to nightly channel you can add\n\ + `cargo-features = [\"{}\"]` to enable this feature", + feature + ); + msg.push_str(&s); + } + failure::bail!("{}", msg); + } + } + + pub fn is_enabled(&self, feature: &Feature) -> bool { + feature.is_enabled(self) + } +} + +/// A parsed representation of all unstable flags that Cargo accepts. +/// +/// Cargo, like `rustc`, accepts a suite of `-Z` flags which are intended for +/// gating unstable functionality to Cargo. These flags are only available on +/// the nightly channel of Cargo. +/// +/// This struct doesn't have quite the same convenience macro that the features +/// have above, but the procedure should still be relatively stable for adding a +/// new unstable flag: +/// +/// 1. First, add a field to this `CliUnstable` structure. All flags are allowed +/// to have a value as the `-Z` flags are either of the form `-Z foo` or +/// `-Z foo=bar`, and it's up to you how to parse `bar`. +/// +/// 2. Add an arm to the match statement in `CliUnstable::add` below to match on +/// your new flag. The key (`k`) is what you're matching on and the value is +/// in `v`. +/// +/// 3. (optional) Add a new parsing function to parse your datatype. As of now +/// there's an example for `bool`, but more can be added! +/// +/// 4. In Cargo use `config.cli_unstable()` to get a reference to this structure +/// and then test for your flag or your value and act accordingly. +/// +/// If you have any trouble with this, please let us know! +#[derive(Default, Debug)] +pub struct CliUnstable { + pub print_im_a_teapot: bool, + pub unstable_options: bool, + pub offline: bool, + pub no_index_update: bool, + pub avoid_dev_deps: bool, + pub minimal_versions: bool, + pub package_features: bool, + pub advanced_env: bool, + pub config_profile: bool, + pub dual_proc_macros: bool, + pub mtime_on_use: bool, +} + +impl CliUnstable { + pub fn parse(&mut self, flags: &[String]) -> CargoResult<()> { + if !flags.is_empty() && !nightly_features_allowed() { + failure::bail!("the `-Z` flag is only accepted on the nightly channel of Cargo") + } + for flag in flags { + self.add(flag)?; + } + Ok(()) + } + + fn add(&mut self, flag: &str) -> CargoResult<()> { + let mut parts = flag.splitn(2, '='); + let k = parts.next().unwrap(); + let v = parts.next(); + + fn parse_bool(value: Option<&str>) -> CargoResult { + match value { + None | Some("yes") => Ok(true), + Some("no") => Ok(false), + Some(s) => failure::bail!("expected `no` or `yes`, found: {}", s), + } + } + + match k { + "print-im-a-teapot" => self.print_im_a_teapot = parse_bool(v)?, + "unstable-options" => self.unstable_options = true, + "offline" => self.offline = true, + "no-index-update" => self.no_index_update = true, + "avoid-dev-deps" => self.avoid_dev_deps = true, + "minimal-versions" => self.minimal_versions = true, + "package-features" => self.package_features = true, + "advanced-env" => self.advanced_env = true, + "config-profile" => self.config_profile = true, + "dual-proc-macros" => self.dual_proc_macros = true, + "mtime-on-use" => self.mtime_on_use = true, + _ => failure::bail!("unknown `-Z` flag specified: {}", k), + } + + Ok(()) + } +} + +fn channel() -> String { + if let Ok(override_channel) = env::var("__CARGO_TEST_CHANNEL_OVERRIDE_DO_NOT_USE_THIS") { + return override_channel; + } + if let Ok(staging) = env::var("RUSTC_BOOTSTRAP") { + if staging == "1" { + return "dev".to_string(); + } + } + crate::version() + .cfg_info + .map(|c| c.release_channel) + .unwrap_or_else(|| String::from("dev")) +} + +thread_local!( + static NIGHTLY_FEATURES_ALLOWED: Cell = Cell::new(false); + static ENABLE_NIGHTLY_FEATURES: Cell = Cell::new(false); +); + +/// This is a little complicated. +/// This should return false if: +/// - this is an artifact of the rustc distribution process for "stable" or for "beta" +/// - this is an `#[test]` that does not opt in with `enable_nightly_features` +/// - this is a integration test that uses `ProcessBuilder` +/// that does not opt in with `masquerade_as_nightly_cargo` +/// This should return true if: +/// - this is an artifact of the rustc distribution process for "nightly" +/// - this is being used in the rustc distribution process internally +/// - this is a cargo executable that was built from source +/// - this is an `#[test]` that called `enable_nightly_features` +/// - this is a integration test that uses `ProcessBuilder` +/// that called `masquerade_as_nightly_cargo` +pub fn nightly_features_allowed() -> bool { + if ENABLE_NIGHTLY_FEATURES.with(|c| c.get()) { + return true; + } + match &channel()[..] { + "nightly" | "dev" => NIGHTLY_FEATURES_ALLOWED.with(|c| c.get()), + _ => false, + } +} + +/// Allows nightly features to be enabled for this thread, but only if the +/// development channel is nightly or dev. +/// +/// Used by cargo main to ensure that a cargo build from source has nightly features +pub fn maybe_allow_nightly_features() { + NIGHTLY_FEATURES_ALLOWED.with(|c| c.set(true)); +} + +/// Forcibly enables nightly features for this thread. +/// +/// Used by tests to allow the use of nightly features. +pub fn enable_nightly_features() { + ENABLE_NIGHTLY_FEATURES.with(|c| c.set(true)); +} diff --git a/src/cargo/core/interning.rs b/src/cargo/core/interning.rs new file mode 100644 index 000000000..de9b07868 --- /dev/null +++ b/src/cargo/core/interning.rs @@ -0,0 +1,107 @@ +use serde::{Serialize, Serializer}; + +use std::borrow::Borrow; +use std::cmp::Ordering; +use std::collections::HashSet; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::ops::Deref; +use std::ptr; +use std::str; +use std::sync::Mutex; + +pub fn leak(s: String) -> &'static str { + Box::leak(s.into_boxed_str()) +} + +lazy_static::lazy_static! { + static ref STRING_CACHE: Mutex> = Mutex::new(HashSet::new()); +} + +#[derive(Clone, Copy)] +pub struct InternedString { + inner: &'static str, +} + +impl PartialEq for InternedString { + fn eq(&self, other: &InternedString) -> bool { + ptr::eq(self.as_str(), other.as_str()) + } +} + +impl Eq for InternedString {} + +impl InternedString { + pub fn new(str: &str) -> InternedString { + let mut cache = STRING_CACHE.lock().unwrap(); + let s = cache.get(str).cloned().unwrap_or_else(|| { + let s = leak(str.to_string()); + cache.insert(s); + s + }); + + InternedString { inner: s } + } + + pub fn as_str(&self) -> &'static str { + self.inner + } +} + +impl Deref for InternedString { + type Target = str; + + fn deref(&self) -> &'static str { + self.as_str() + } +} + +impl Hash for InternedString { + // N.B., we can't implement this as `identity(self).hash(state)`, + // because we use this for on-disk fingerprints and so need + // stability across Cargo invocations. + fn hash(&self, state: &mut H) { + self.as_str().hash(state); + } +} + +impl Borrow for InternedString { + // If we implement Hash as `identity(self).hash(state)`, + // then this will nead to be removed. + fn borrow(&self) -> &str { + self.as_str() + } +} + +impl fmt::Debug for InternedString { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(self.as_str(), f) + } +} + +impl fmt::Display for InternedString { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self.as_str(), f) + } +} + +impl Ord for InternedString { + fn cmp(&self, other: &InternedString) -> Ordering { + self.as_str().cmp(other.as_str()) + } +} + +impl PartialOrd for InternedString { + fn partial_cmp(&self, other: &InternedString) -> Option { + Some(self.cmp(other)) + } +} + +impl Serialize for InternedString { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self.inner) + } +} diff --git a/src/cargo/core/manifest.rs b/src/cargo/core/manifest.rs new file mode 100644 index 000000000..4fdbe65fa --- /dev/null +++ b/src/cargo/core/manifest.rs @@ -0,0 +1,931 @@ +use std::collections::{BTreeMap, HashMap}; +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::path::{Path, PathBuf}; +use std::rc::Rc; + +use semver::Version; +use serde::ser; +use serde::Serialize; +use url::Url; + +use crate::core::interning::InternedString; +use crate::core::profiles::Profiles; +use crate::core::{Dependency, PackageId, PackageIdSpec, SourceId, Summary}; +use crate::core::{Edition, Feature, Features, WorkspaceConfig}; +use crate::util::errors::*; +use crate::util::toml::TomlManifest; +use crate::util::{short_hash, Config, Filesystem}; + +pub enum EitherManifest { + Real(Manifest), + Virtual(VirtualManifest), +} + +/// Contains all the information about a package, as loaded from a `Cargo.toml`. +#[derive(Clone, Debug)] +pub struct Manifest { + summary: Summary, + targets: Vec, + links: Option, + warnings: Warnings, + exclude: Vec, + include: Vec, + metadata: ManifestMetadata, + custom_metadata: Option, + profiles: Profiles, + publish: Option>, + publish_lockfile: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + original: Rc, + features: Features, + edition: Edition, + im_a_teapot: Option, + default_run: Option, + metabuild: Option>, +} + +/// When parsing `Cargo.toml`, some warnings should silenced +/// if the manifest comes from a dependency. `ManifestWarning` +/// allows this delayed emission of warnings. +#[derive(Clone, Debug)] +pub struct DelayedWarning { + pub message: String, + pub is_critical: bool, +} + +#[derive(Clone, Debug)] +pub struct Warnings(Vec); + +#[derive(Clone, Debug)] +pub struct VirtualManifest { + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles, + warnings: Warnings, + features: Features, +} + +/// General metadata about a package which is just blindly uploaded to the +/// registry. +/// +/// Note that many of these fields can contain invalid values such as the +/// homepage, repository, documentation, or license. These fields are not +/// validated by cargo itself, but rather it is up to the registry when uploaded +/// to validate these fields. Cargo will itself accept any valid TOML +/// specification for these values. +#[derive(PartialEq, Clone, Debug)] +pub struct ManifestMetadata { + pub authors: Vec, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub description: Option, // Not in Markdown + pub readme: Option, // File, not contents + pub homepage: Option, // URL + pub repository: Option, // URL + pub documentation: Option, // URL + pub badges: BTreeMap>, + pub links: Option, +} + +#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum LibKind { + Lib, + Rlib, + Dylib, + ProcMacro, + Other(String), +} + +impl LibKind { + /// Returns the argument suitable for `--crate-type` to pass to rustc. + pub fn crate_type(&self) -> &str { + match *self { + LibKind::Lib => "lib", + LibKind::Rlib => "rlib", + LibKind::Dylib => "dylib", + LibKind::ProcMacro => "proc-macro", + LibKind::Other(ref s) => s, + } + } + + pub fn linkable(&self) -> bool { + match *self { + LibKind::Lib | LibKind::Rlib | LibKind::Dylib | LibKind::ProcMacro => true, + LibKind::Other(..) => false, + } + } +} + +impl fmt::Debug for LibKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.crate_type().fmt(f) + } +} + +impl<'a> From<&'a String> for LibKind { + fn from(string: &'a String) -> Self { + match string.as_ref() { + "lib" => LibKind::Lib, + "rlib" => LibKind::Rlib, + "dylib" => LibKind::Dylib, + "proc-macro" => LibKind::ProcMacro, + s => LibKind::Other(s.to_string()), + } + } +} + +#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub enum TargetKind { + Lib(Vec), + Bin, + Test, + Bench, + ExampleLib(Vec), + ExampleBin, + CustomBuild, +} + +impl ser::Serialize for TargetKind { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + use self::TargetKind::*; + match *self { + Lib(ref kinds) => s.collect_seq(kinds.iter().map(LibKind::crate_type)), + Bin => ["bin"].serialize(s), + ExampleBin | ExampleLib(_) => ["example"].serialize(s), + Test => ["test"].serialize(s), + CustomBuild => ["custom-build"].serialize(s), + Bench => ["bench"].serialize(s), + } + } +} + +impl fmt::Debug for TargetKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use self::TargetKind::*; + match *self { + Lib(ref kinds) => kinds.fmt(f), + Bin => "bin".fmt(f), + ExampleBin | ExampleLib(_) => "example".fmt(f), + Test => "test".fmt(f), + CustomBuild => "custom-build".fmt(f), + Bench => "bench".fmt(f), + } + } +} + +impl TargetKind { + pub fn description(&self) -> &'static str { + match self { + TargetKind::Lib(..) => "lib", + TargetKind::Bin => "bin", + TargetKind::Test => "integration-test", + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => "example", + TargetKind::Bench => "bench", + TargetKind::CustomBuild => "build-script", + } + } +} + +/// Information about a binary, a library, an example, etc. that is part of the +/// package. +#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct Target { + kind: TargetKind, + name: String, + // Note that the `src_path` here is excluded from the `Hash` implementation + // as it's absolute currently and is otherwise a little too brittle for + // causing rebuilds. Instead the hash for the path that we send to the + // compiler is handled elsewhere. + src_path: TargetSourcePath, + required_features: Option>, + tested: bool, + benched: bool, + doc: bool, + doctest: bool, + harness: bool, // whether to use the test harness (--test) + for_host: bool, + proc_macro: bool, + edition: Edition, +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum TargetSourcePath { + Path(PathBuf), + Metabuild, +} + +impl TargetSourcePath { + pub fn path(&self) -> Option<&Path> { + match self { + TargetSourcePath::Path(path) => Some(path.as_ref()), + TargetSourcePath::Metabuild => None, + } + } + + pub fn is_path(&self) -> bool { + match self { + TargetSourcePath::Path(_) => true, + _ => false, + } + } +} + +impl Hash for TargetSourcePath { + fn hash(&self, _: &mut H) { + // ... + } +} + +impl fmt::Debug for TargetSourcePath { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TargetSourcePath::Path(path) => path.fmt(f), + TargetSourcePath::Metabuild => "metabuild".fmt(f), + } + } +} + +impl From for TargetSourcePath { + fn from(path: PathBuf) -> Self { + assert!(path.is_absolute(), "`{}` is not absolute", path.display()); + TargetSourcePath::Path(path) + } +} + +#[derive(Serialize)] +struct SerializedTarget<'a> { + /// Is this a `--bin bin`, `--lib`, `--example ex`? + /// Serialized as a list of strings for historical reasons. + kind: &'a TargetKind, + /// Corresponds to `--crate-type` compiler attribute. + /// See https://doc.rust-lang.org/reference/linkage.html + crate_types: Vec<&'a str>, + name: &'a str, + src_path: Option<&'a PathBuf>, + edition: &'a str, + #[serde(rename = "required-features", skip_serializing_if = "Option::is_none")] + required_features: Option>, +} + +impl ser::Serialize for Target { + fn serialize(&self, s: S) -> Result { + let src_path = match &self.src_path { + TargetSourcePath::Path(p) => Some(p), + // Unfortunately getting the correct path would require access to + // target_dir, which is not available here. + TargetSourcePath::Metabuild => None, + }; + SerializedTarget { + kind: &self.kind, + crate_types: self.rustc_crate_types(), + name: &self.name, + src_path, + edition: &self.edition.to_string(), + required_features: self + .required_features + .as_ref() + .map(|rf| rf.iter().map(|s| &**s).collect()), + } + .serialize(s) + } +} + +compact_debug! { + impl fmt::Debug for Target { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (default, default_name) = { + match &self.kind { + TargetKind::Lib(kinds) => { + ( + Target::lib_target( + &self.name, + kinds.clone(), + self.src_path().path().unwrap().to_path_buf(), + self.edition, + ), + format!("lib_target({:?}, {:?}, {:?}, {:?})", + self.name, kinds, self.src_path, self.edition), + ) + } + TargetKind::CustomBuild => { + match self.src_path { + TargetSourcePath::Path(ref path) => { + ( + Target::custom_build_target( + &self.name, + path.to_path_buf(), + self.edition, + ), + format!("custom_build_target({:?}, {:?}, {:?})", + self.name, path, self.edition), + ) + } + TargetSourcePath::Metabuild => { + ( + Target::metabuild_target(&self.name), + format!("metabuild_target({:?})", self.name), + ) + } + } + } + _ => ( + Target::new(self.src_path.clone(), self.edition), + format!("with_path({:?}, {:?})", self.src_path, self.edition), + ), + } + }; + [debug_the_fields( + kind + name + src_path + required_features + tested + benched + doc + doctest + harness + for_host + proc_macro + edition + )] + } + } +} + +impl Manifest { + pub fn new( + summary: Summary, + targets: Vec, + exclude: Vec, + include: Vec, + links: Option, + metadata: ManifestMetadata, + custom_metadata: Option, + profiles: Profiles, + publish: Option>, + publish_lockfile: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + features: Features, + edition: Edition, + im_a_teapot: Option, + default_run: Option, + original: Rc, + metabuild: Option>, + ) -> Manifest { + Manifest { + summary, + targets, + warnings: Warnings::new(), + exclude, + include, + links, + metadata, + custom_metadata, + profiles, + publish, + replace, + patch, + workspace, + features, + edition, + original, + im_a_teapot, + default_run, + publish_lockfile, + metabuild, + } + } + + pub fn dependencies(&self) -> &[Dependency] { + self.summary.dependencies() + } + pub fn exclude(&self) -> &[String] { + &self.exclude + } + pub fn include(&self) -> &[String] { + &self.include + } + pub fn metadata(&self) -> &ManifestMetadata { + &self.metadata + } + pub fn name(&self) -> InternedString { + self.package_id().name() + } + pub fn package_id(&self) -> PackageId { + self.summary.package_id() + } + pub fn summary(&self) -> &Summary { + &self.summary + } + pub fn targets(&self) -> &[Target] { + &self.targets + } + pub fn version(&self) -> &Version { + self.package_id().version() + } + pub fn warnings_mut(&mut self) -> &mut Warnings { + &mut self.warnings + } + pub fn warnings(&self) -> &Warnings { + &self.warnings + } + pub fn profiles(&self) -> &Profiles { + &self.profiles + } + pub fn publish(&self) -> &Option> { + &self.publish + } + pub fn publish_lockfile(&self) -> bool { + self.publish_lockfile + } + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { + &self.replace + } + pub fn original(&self) -> &TomlManifest { + &self.original + } + pub fn patch(&self) -> &HashMap> { + &self.patch + } + pub fn links(&self) -> Option<&str> { + self.links.as_ref().map(|s| &s[..]) + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn features(&self) -> &Features { + &self.features + } + + pub fn set_summary(&mut self, summary: Summary) { + self.summary = summary; + } + + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Manifest { + Manifest { + summary: self.summary.map_source(to_replace, replace_with), + ..self + } + } + + pub fn feature_gate(&self) -> CargoResult<()> { + if self.im_a_teapot.is_some() { + self.features + .require(Feature::test_dummy_unstable()) + .chain_err(|| { + failure::format_err!( + "the `im-a-teapot` manifest key is unstable and may \ + not work properly in England" + ) + })?; + } + + if self.default_run.is_some() { + self.features + .require(Feature::default_run()) + .chain_err(|| failure::format_err!("the `default-run` manifest key is unstable"))?; + } + + Ok(()) + } + + // Just a helper function to test out `-Z` flags on Cargo + pub fn print_teapot(&self, config: &Config) { + if let Some(teapot) = self.im_a_teapot { + if config.cli_unstable().print_im_a_teapot { + println!("im-a-teapot = {}", teapot); + } + } + } + + pub fn edition(&self) -> Edition { + self.edition + } + + pub fn custom_metadata(&self) -> Option<&toml::Value> { + self.custom_metadata.as_ref() + } + + pub fn default_run(&self) -> Option<&str> { + self.default_run.as_ref().map(|s| &s[..]) + } + + pub fn metabuild(&self) -> Option<&Vec> { + self.metabuild.as_ref() + } + + pub fn metabuild_path(&self, target_dir: Filesystem) -> PathBuf { + let hash = short_hash(&self.package_id()); + target_dir + .into_path_unlocked() + .join(".metabuild") + .join(format!("metabuild-{}-{}.rs", self.name(), hash)) + } +} + +impl VirtualManifest { + pub fn new( + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles, + features: Features, + ) -> VirtualManifest { + VirtualManifest { + replace, + patch, + workspace, + profiles, + warnings: Warnings::new(), + features, + } + } + + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { + &self.replace + } + + pub fn patch(&self) -> &HashMap> { + &self.patch + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn profiles(&self) -> &Profiles { + &self.profiles + } + + pub fn warnings_mut(&mut self) -> &mut Warnings { + &mut self.warnings + } + + pub fn warnings(&self) -> &Warnings { + &self.warnings + } + + pub fn features(&self) -> &Features { + &self.features + } +} + +impl Target { + fn new(src_path: TargetSourcePath, edition: Edition) -> Target { + Target { + kind: TargetKind::Bin, + name: String::new(), + src_path, + required_features: None, + doc: false, + doctest: false, + harness: true, + for_host: false, + proc_macro: false, + edition, + tested: true, + benched: true, + } + } + + fn with_path(src_path: PathBuf, edition: Edition) -> Target { + Target::new(TargetSourcePath::from(src_path), edition) + } + + pub fn lib_target( + name: &str, + crate_targets: Vec, + src_path: PathBuf, + edition: Edition, + ) -> Target { + Target { + kind: TargetKind::Lib(crate_targets), + name: name.to_string(), + doctest: true, + doc: true, + ..Target::with_path(src_path, edition) + } + } + + pub fn bin_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + edition: Edition, + ) -> Target { + Target { + kind: TargetKind::Bin, + name: name.to_string(), + required_features, + doc: true, + ..Target::with_path(src_path, edition) + } + } + + /// Builds a `Target` corresponding to the `build = "build.rs"` entry. + pub fn custom_build_target(name: &str, src_path: PathBuf, edition: Edition) -> Target { + Target { + kind: TargetKind::CustomBuild, + name: name.to_string(), + for_host: true, + benched: false, + tested: false, + ..Target::with_path(src_path, edition) + } + } + + pub fn metabuild_target(name: &str) -> Target { + Target { + kind: TargetKind::CustomBuild, + name: name.to_string(), + for_host: true, + benched: false, + tested: false, + ..Target::new(TargetSourcePath::Metabuild, Edition::Edition2018) + } + } + + pub fn example_target( + name: &str, + crate_targets: Vec, + src_path: PathBuf, + required_features: Option>, + edition: Edition, + ) -> Target { + let kind = if crate_targets.is_empty() + || crate_targets + .iter() + .all(|t| *t == LibKind::Other("bin".into())) + { + TargetKind::ExampleBin + } else { + TargetKind::ExampleLib(crate_targets) + }; + + Target { + kind, + name: name.to_string(), + required_features, + tested: false, + benched: false, + ..Target::with_path(src_path, edition) + } + } + + pub fn test_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + edition: Edition, + ) -> Target { + Target { + kind: TargetKind::Test, + name: name.to_string(), + required_features, + benched: false, + ..Target::with_path(src_path, edition) + } + } + + pub fn bench_target( + name: &str, + src_path: PathBuf, + required_features: Option>, + edition: Edition, + ) -> Target { + Target { + kind: TargetKind::Bench, + name: name.to_string(), + required_features, + tested: false, + ..Target::with_path(src_path, edition) + } + } + + pub fn name(&self) -> &str { + &self.name + } + pub fn crate_name(&self) -> String { + self.name.replace("-", "_") + } + pub fn src_path(&self) -> &TargetSourcePath { + &self.src_path + } + pub fn set_src_path(&mut self, src_path: TargetSourcePath) { + self.src_path = src_path; + } + pub fn required_features(&self) -> Option<&Vec> { + self.required_features.as_ref() + } + pub fn kind(&self) -> &TargetKind { + &self.kind + } + pub fn tested(&self) -> bool { + self.tested + } + pub fn harness(&self) -> bool { + self.harness + } + pub fn documented(&self) -> bool { + self.doc + } + pub fn for_host(&self) -> bool { + self.for_host + } + pub fn proc_macro(&self) -> bool { + self.proc_macro + } + pub fn edition(&self) -> Edition { + self.edition + } + pub fn benched(&self) -> bool { + self.benched + } + pub fn doctested(&self) -> bool { + self.doctest + } + + pub fn doctestable(&self) -> bool { + match self.kind { + TargetKind::Lib(ref kinds) => kinds + .iter() + .any(|k| *k == LibKind::Rlib || *k == LibKind::Lib || *k == LibKind::ProcMacro), + _ => false, + } + } + + pub fn allows_underscores(&self) -> bool { + self.is_bin() || self.is_example() || self.is_custom_build() + } + + pub fn is_lib(&self) -> bool { + match self.kind { + TargetKind::Lib(_) => true, + _ => false, + } + } + + pub fn is_dylib(&self) -> bool { + match self.kind { + TargetKind::Lib(ref libs) => libs.iter().any(|l| *l == LibKind::Dylib), + _ => false, + } + } + + pub fn is_cdylib(&self) -> bool { + let libs = match self.kind { + TargetKind::Lib(ref libs) => libs, + _ => return false, + }; + libs.iter().any(|l| match *l { + LibKind::Other(ref s) => s == "cdylib", + _ => false, + }) + } + + pub fn linkable(&self) -> bool { + match self.kind { + TargetKind::Lib(ref kinds) => kinds.iter().any(|k| k.linkable()), + _ => false, + } + } + + pub fn is_bin(&self) -> bool { + self.kind == TargetKind::Bin + } + + pub fn is_example(&self) -> bool { + match self.kind { + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => true, + _ => false, + } + } + + pub fn is_bin_example(&self) -> bool { + // Needed for --all-examples in contexts where only runnable examples make sense + match self.kind { + TargetKind::ExampleBin => true, + _ => false, + } + } + + pub fn is_test(&self) -> bool { + self.kind == TargetKind::Test + } + pub fn is_bench(&self) -> bool { + self.kind == TargetKind::Bench + } + pub fn is_custom_build(&self) -> bool { + self.kind == TargetKind::CustomBuild + } + + /// Returns the arguments suitable for `--crate-type` to pass to rustc. + pub fn rustc_crate_types(&self) -> Vec<&str> { + match self.kind { + TargetKind::Lib(ref kinds) | TargetKind::ExampleLib(ref kinds) => { + kinds.iter().map(LibKind::crate_type).collect() + } + TargetKind::CustomBuild + | TargetKind::Bench + | TargetKind::Test + | TargetKind::ExampleBin + | TargetKind::Bin => vec!["bin"], + } + } + + pub fn can_lto(&self) -> bool { + match self.kind { + TargetKind::Lib(ref v) => { + !v.contains(&LibKind::Rlib) + && !v.contains(&LibKind::Dylib) + && !v.contains(&LibKind::Lib) + } + _ => true, + } + } + + pub fn set_tested(&mut self, tested: bool) -> &mut Target { + self.tested = tested; + self + } + pub fn set_benched(&mut self, benched: bool) -> &mut Target { + self.benched = benched; + self + } + pub fn set_doctest(&mut self, doctest: bool) -> &mut Target { + self.doctest = doctest; + self + } + pub fn set_for_host(&mut self, for_host: bool) -> &mut Target { + self.for_host = for_host; + self + } + pub fn set_proc_macro(&mut self, proc_macro: bool) -> &mut Target { + self.proc_macro = proc_macro; + self + } + pub fn set_edition(&mut self, edition: Edition) -> &mut Target { + self.edition = edition; + self + } + pub fn set_harness(&mut self, harness: bool) -> &mut Target { + self.harness = harness; + self + } + pub fn set_doc(&mut self, doc: bool) -> &mut Target { + self.doc = doc; + self + } +} + +impl fmt::Display for Target { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.kind { + TargetKind::Lib(..) => write!(f, "Target(lib)"), + TargetKind::Bin => write!(f, "Target(bin: {})", self.name), + TargetKind::Test => write!(f, "Target(test: {})", self.name), + TargetKind::Bench => write!(f, "Target(bench: {})", self.name), + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => { + write!(f, "Target(example: {})", self.name) + } + TargetKind::CustomBuild => write!(f, "Target(script)"), + } + } +} + +impl Warnings { + fn new() -> Warnings { + Warnings(Vec::new()) + } + + pub fn add_warning(&mut self, s: String) { + self.0.push(DelayedWarning { + message: s, + is_critical: false, + }) + } + + pub fn add_critical_warning(&mut self, s: String) { + self.0.push(DelayedWarning { + message: s, + is_critical: true, + }) + } + + pub fn warnings(&self) -> &[DelayedWarning] { + &self.0 + } +} diff --git a/src/cargo/core/mod.rs b/src/cargo/core/mod.rs new file mode 100644 index 000000000..f94291f74 --- /dev/null +++ b/src/cargo/core/mod.rs @@ -0,0 +1,32 @@ +pub use self::dependency::Dependency; +pub use self::features::{ + enable_nightly_features, maybe_allow_nightly_features, nightly_features_allowed, +}; +pub use self::features::{CliUnstable, Edition, Feature, Features}; +pub use self::manifest::{EitherManifest, VirtualManifest}; +pub use self::manifest::{LibKind, Manifest, Target, TargetKind}; +pub use self::package::{Package, PackageSet}; +pub use self::package_id::PackageId; +pub use self::package_id_spec::PackageIdSpec; +pub use self::registry::Registry; +pub use self::resolver::Resolve; +pub use self::shell::{Shell, Verbosity}; +pub use self::source::{GitReference, Source, SourceId, SourceMap}; +pub use self::summary::{FeatureMap, FeatureValue, Summary}; +pub use self::workspace::{Members, Workspace, WorkspaceConfig, WorkspaceRootConfig}; + +pub mod compiler; +pub mod dependency; +mod features; +mod interning; +pub mod manifest; +pub mod package; +pub mod package_id; +mod package_id_spec; +pub mod profiles; +pub mod registry; +pub mod resolver; +pub mod shell; +pub mod source; +pub mod summary; +mod workspace; diff --git a/src/cargo/core/package.rs b/src/cargo/core/package.rs new file mode 100644 index 000000000..a4850b11f --- /dev/null +++ b/src/cargo/core/package.rs @@ -0,0 +1,937 @@ +use std::cell::{Cell, Ref, RefCell}; +use std::cmp::Ordering; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::hash; +use std::mem; +use std::path::{Path, PathBuf}; +use std::time::{Duration, Instant}; + +use bytesize::ByteSize; +use curl::easy::{Easy, HttpVersion}; +use curl::multi::{EasyHandle, Multi}; +use failure::ResultExt; +use lazycell::LazyCell; +use log::{debug, warn}; +use semver::Version; +use serde::ser; +use serde::Serialize; + +use crate::core::interning::InternedString; +use crate::core::source::MaybePackage; +use crate::core::{Dependency, Manifest, PackageId, SourceId, Target}; +use crate::core::{FeatureMap, SourceMap, Summary}; +use crate::ops; +use crate::util::errors::{CargoResult, CargoResultExt, HttpNot200}; +use crate::util::network::Retry; +use crate::util::{self, internal, lev_distance, Config, Progress, ProgressStyle}; + +/// Information about a package that is available somewhere in the file system. +/// +/// A package is a `Cargo.toml` file plus all the files that are part of it. +// +// TODO: is `manifest_path` a relic? +#[derive(Clone)] +pub struct Package { + /// The package's manifest. + manifest: Manifest, + /// The root of the package. + manifest_path: PathBuf, +} + +impl Ord for Package { + fn cmp(&self, other: &Package) -> Ordering { + self.package_id().cmp(&other.package_id()) + } +} + +impl PartialOrd for Package { + fn partial_cmp(&self, other: &Package) -> Option { + Some(self.cmp(other)) + } +} + +/// A Package in a form where `Serialize` can be derived. +#[derive(Serialize)] +struct SerializedPackage<'a> { + name: &'a str, + version: &'a Version, + id: PackageId, + license: Option<&'a str>, + license_file: Option<&'a str>, + description: Option<&'a str>, + source: SourceId, + dependencies: &'a [Dependency], + targets: Vec<&'a Target>, + features: &'a FeatureMap, + manifest_path: &'a Path, + metadata: Option<&'a toml::Value>, + authors: &'a [String], + categories: &'a [String], + keywords: &'a [String], + readme: Option<&'a str>, + repository: Option<&'a str>, + edition: &'a str, + links: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + metabuild: Option<&'a Vec>, +} + +impl ser::Serialize for Package { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + let summary = self.manifest.summary(); + let package_id = summary.package_id(); + let manmeta = self.manifest.metadata(); + let license = manmeta.license.as_ref().map(String::as_ref); + let license_file = manmeta.license_file.as_ref().map(String::as_ref); + let description = manmeta.description.as_ref().map(String::as_ref); + let authors = manmeta.authors.as_ref(); + let categories = manmeta.categories.as_ref(); + let keywords = manmeta.keywords.as_ref(); + let readme = manmeta.readme.as_ref().map(String::as_ref); + let repository = manmeta.repository.as_ref().map(String::as_ref); + // Filter out metabuild targets. They are an internal implementation + // detail that is probably not relevant externally. There's also not a + // real path to show in `src_path`, and this avoids changing the format. + let targets: Vec<&Target> = self + .manifest + .targets() + .iter() + .filter(|t| t.src_path().is_path()) + .collect(); + + SerializedPackage { + name: &*package_id.name(), + version: &package_id.version(), + id: package_id, + license, + license_file, + description, + source: summary.source_id(), + dependencies: summary.dependencies(), + targets, + features: summary.features(), + manifest_path: &self.manifest_path, + metadata: self.manifest.custom_metadata(), + authors, + categories, + keywords, + readme, + repository, + edition: &self.manifest.edition().to_string(), + links: self.manifest.links(), + metabuild: self.manifest.metabuild(), + } + .serialize(s) + } +} + +impl Package { + /// Creates a package from a manifest and its location. + pub fn new(manifest: Manifest, manifest_path: &Path) -> Package { + Package { + manifest, + manifest_path: manifest_path.to_path_buf(), + } + } + + /// Gets the manifest dependencies. + pub fn dependencies(&self) -> &[Dependency] { + self.manifest.dependencies() + } + /// Gets the manifest. + pub fn manifest(&self) -> &Manifest { + &self.manifest + } + /// Gets the path to the manifest. + pub fn manifest_path(&self) -> &Path { + &self.manifest_path + } + /// Gets the name of the package. + pub fn name(&self) -> InternedString { + self.package_id().name() + } + /// Gets the `PackageId` object for the package (fully defines a package). + pub fn package_id(&self) -> PackageId { + self.manifest.package_id() + } + /// Gets the root folder of the package. + pub fn root(&self) -> &Path { + self.manifest_path.parent().unwrap() + } + /// Gets the summary for the package. + pub fn summary(&self) -> &Summary { + self.manifest.summary() + } + /// Gets the targets specified in the manifest. + pub fn targets(&self) -> &[Target] { + self.manifest.targets() + } + /// Gets the current package version. + pub fn version(&self) -> &Version { + self.package_id().version() + } + /// Gets the package authors. + pub fn authors(&self) -> &Vec { + &self.manifest.metadata().authors + } + /// Returns `true` if the package is set to publish. + pub fn publish(&self) -> &Option> { + self.manifest.publish() + } + + /// Returns `true` if the package uses a custom build script for any target. + pub fn has_custom_build(&self) -> bool { + self.targets().iter().any(|t| t.is_custom_build()) + } + + pub fn find_closest_target( + &self, + target: &str, + is_expected_kind: fn(&Target) -> bool, + ) -> Option<&Target> { + let targets = self.targets(); + + let matches = targets + .iter() + .filter(|t| is_expected_kind(t)) + .map(|t| (lev_distance(target, t.name()), t)) + .filter(|&(d, _)| d < 4); + matches.min_by_key(|t| t.0).map(|t| t.1) + } + + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Package { + Package { + manifest: self.manifest.map_source(to_replace, replace_with), + manifest_path: self.manifest_path, + } + } + + pub fn to_registry_toml(&self, config: &Config) -> CargoResult { + let manifest = self.manifest().original().prepare_for_publish(config)?; + let toml = toml::to_string(&manifest)?; + Ok(format!( + "\ + # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO\n\ + #\n\ + # When uploading crates to the registry Cargo will automatically\n\ + # \"normalize\" Cargo.toml files for maximal compatibility\n\ + # with all versions of Cargo and also rewrite `path` dependencies\n\ + # to registry (e.g., crates.io) dependencies\n\ + #\n\ + # If you believe there's an error in this file please file an\n\ + # issue against the rust-lang/cargo repository. If you're\n\ + # editing this file be aware that the upstream Cargo.toml\n\ + # will likely look very different (and much more reasonable)\n\ + \n\ + {}\ + ", + toml + )) + } +} + +impl fmt::Display for Package { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.summary().package_id()) + } +} + +impl fmt::Debug for Package { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Package") + .field("id", &self.summary().package_id()) + .field("..", &"..") + .finish() + } +} + +impl PartialEq for Package { + fn eq(&self, other: &Package) -> bool { + self.package_id() == other.package_id() + } +} + +impl Eq for Package {} + +impl hash::Hash for Package { + fn hash(&self, into: &mut H) { + self.package_id().hash(into) + } +} + +pub struct PackageSet<'cfg> { + packages: HashMap>, + sources: RefCell>, + config: &'cfg Config, + multi: Multi, + downloading: Cell, + multiplexing: bool, +} + +pub struct Downloads<'a, 'cfg: 'a> { + set: &'a PackageSet<'cfg>, + pending: HashMap, EasyHandle)>, + pending_ids: HashSet, + results: Vec<(usize, Result<(), curl::Error>)>, + next: usize, + progress: RefCell>>, + downloads_finished: usize, + downloaded_bytes: u64, + largest: (u64, String), + start: Instant, + success: bool, + + /// Timeout management, both of timeout thresholds as well as whether or not + /// our connection has timed out (and accompanying message if it has). + /// + /// Note that timeout management is done manually here instead of in libcurl + /// because we want to apply timeouts to an entire batch of operations, not + /// any one particular single operation. + timeout: ops::HttpTimeout, // timeout configuration + updated_at: Cell, // last time we received bytes + next_speed_check: Cell, // if threshold isn't 0 by this time, error + next_speed_check_bytes_threshold: Cell, // decremented when we receive bytes +} + +struct Download<'cfg> { + /// The token for this download, used as the key of the `Downloads::pending` map + /// and stored in `EasyHandle` as well. + token: usize, + + /// The package that we're downloading. + id: PackageId, + + /// Actual downloaded data, updated throughout the lifetime of this download. + data: RefCell>, + + /// The URL that we're downloading from, cached here for error messages and + /// reenqueuing. + url: String, + + /// A descriptive string to print when we've finished downloading this crate. + descriptor: String, + + /// Statistics updated from the progress callback in libcurl. + total: Cell, + current: Cell, + + /// The moment we started this transfer at. + start: Instant, + timed_out: Cell>, + + /// Logic used to track retrying this download if it's a spurious failure. + retry: Retry<'cfg>, +} + +impl<'cfg> PackageSet<'cfg> { + pub fn new( + package_ids: &[PackageId], + sources: SourceMap<'cfg>, + config: &'cfg Config, + ) -> CargoResult> { + // We've enabled the `http2` feature of `curl` in Cargo, so treat + // failures here as fatal as it would indicate a build-time problem. + // + // Note that the multiplexing support is pretty new so we're having it + // off-by-default temporarily. + // + // Also note that pipelining is disabled as curl authors have indicated + // that it's buggy, and we've empirically seen that it's buggy with HTTP + // proxies. + let mut multi = Multi::new(); + let multiplexing = config + .get::>("http.multiplexing")? + .unwrap_or(true); + multi + .pipelining(false, multiplexing) + .chain_err(|| "failed to enable multiplexing/pipelining in curl")?; + + // let's not flood crates.io with connections + multi.set_max_host_connections(2)?; + + Ok(PackageSet { + packages: package_ids + .iter() + .map(|&id| (id, LazyCell::new())) + .collect(), + sources: RefCell::new(sources), + config, + multi, + downloading: Cell::new(false), + multiplexing, + }) + } + + pub fn package_ids<'a>(&'a self) -> impl Iterator + 'a { + self.packages.keys().cloned() + } + + pub fn enable_download<'a>(&'a self) -> CargoResult> { + assert!(!self.downloading.replace(true)); + let timeout = ops::HttpTimeout::new(self.config)?; + Ok(Downloads { + start: Instant::now(), + set: self, + next: 0, + pending: HashMap::new(), + pending_ids: HashSet::new(), + results: Vec::new(), + progress: RefCell::new(Some(Progress::with_style( + "Downloading", + ProgressStyle::Ratio, + self.config, + ))), + downloads_finished: 0, + downloaded_bytes: 0, + largest: (0, String::new()), + success: false, + updated_at: Cell::new(Instant::now()), + timeout, + next_speed_check: Cell::new(Instant::now()), + next_speed_check_bytes_threshold: Cell::new(0), + }) + } + + pub fn get_one(&self, id: PackageId) -> CargoResult<&Package> { + Ok(self.get_many(Some(id))?.remove(0)) + } + + pub fn get_many(&self, ids: impl IntoIterator) -> CargoResult> { + let mut pkgs = Vec::new(); + let mut downloads = self.enable_download()?; + for id in ids { + pkgs.extend(downloads.start(id)?); + } + while downloads.remaining() > 0 { + pkgs.push(downloads.wait()?); + } + downloads.success = true; + Ok(pkgs) + } + + pub fn sources(&self) -> Ref<'_, SourceMap<'cfg>> { + self.sources.borrow() + } +} + +// When dynamically linked against libcurl, we want to ignore some failures +// when using old versions that don't support certain features. +macro_rules! try_old_curl { + ($e:expr, $msg:expr) => { + let result = $e; + if cfg!(target_os = "macos") { + if let Err(e) = result { + warn!("ignoring libcurl {} error: {}", $msg, e); + } + } else { + result.with_context(|_| { + failure::format_err!("failed to enable {}, is curl not built right?", $msg) + })?; + } + }; +} + +impl<'a, 'cfg> Downloads<'a, 'cfg> { + /// Starts to download the package for the `id` specified. + /// + /// Returns `None` if the package is queued up for download and will + /// eventually be returned from `wait_for_download`. Returns `Some(pkg)` if + /// the package is ready and doesn't need to be downloaded. + pub fn start(&mut self, id: PackageId) -> CargoResult> { + // First up see if we've already cached this package, in which case + // there's nothing to do. + let slot = self + .set + .packages + .get(&id) + .ok_or_else(|| internal(format!("couldn't find `{}` in package set", id)))?; + if let Some(pkg) = slot.borrow() { + return Ok(Some(pkg)); + } + + // Ask the original source fo this `PackageId` for the corresponding + // package. That may immediately come back and tell us that the package + // is ready, or it could tell us that it needs to be downloaded. + let mut sources = self.set.sources.borrow_mut(); + let source = sources + .get_mut(id.source_id()) + .ok_or_else(|| internal(format!("couldn't find source for `{}`", id)))?; + let pkg = source + .download(id) + .chain_err(|| failure::format_err!("unable to get packages from source"))?; + let (url, descriptor) = match pkg { + MaybePackage::Ready(pkg) => { + debug!("{} doesn't need a download", id); + assert!(slot.fill(pkg).is_ok()); + return Ok(Some(slot.borrow().unwrap())); + } + MaybePackage::Download { url, descriptor } => (url, descriptor), + }; + + // Ok we're going to download this crate, so let's set up all our + // internal state and hand off an `Easy` handle to our libcurl `Multi` + // handle. This won't actually start the transfer, but later it'll + // hapen during `wait_for_download` + let token = self.next; + self.next += 1; + debug!("downloading {} as {}", id, token); + assert!(self.pending_ids.insert(id)); + + let (mut handle, _timeout) = ops::http_handle_and_timeout(self.set.config)?; + handle.get(true)?; + handle.url(&url)?; + handle.follow_location(true)?; // follow redirects + + // Enable HTTP/2 to be used as it'll allow true multiplexing which makes + // downloads much faster. + // + // Currently Cargo requests the `http2` feature of the `curl` crate + // which means it should always be built in. On OSX, however, we ship + // cargo still linked against the system libcurl. Building curl with + // ALPN support for HTTP/2 requires newer versions of OSX (the + // SecureTransport API) than we want to ship Cargo for. By linking Cargo + // against the system libcurl then older curl installations won't use + // HTTP/2 but newer ones will. All that to basically say we ignore + // errors here on OSX, but consider this a fatal error to not activate + // HTTP/2 on all other platforms. + if self.set.multiplexing { + try_old_curl!(handle.http_version(HttpVersion::V2), "HTTP2"); + } else { + handle.http_version(HttpVersion::V11)?; + } + + // This is an option to `libcurl` which indicates that if there's a + // bunch of parallel requests to the same host they all wait until the + // pipelining status of the host is known. This means that we won't + // initiate dozens of connections to crates.io, but rather only one. + // Once the main one is opened we realized that pipelining is possible + // and multiplexing is possible with static.crates.io. All in all this + // reduces the number of connections done to a more manageable state. + try_old_curl!(handle.pipewait(true), "pipewait"); + + handle.write_function(move |buf| { + debug!("{} - {} bytes of data", token, buf.len()); + tls::with(|downloads| { + if let Some(downloads) = downloads { + downloads.pending[&token] + .0 + .data + .borrow_mut() + .extend_from_slice(buf); + } + }); + Ok(buf.len()) + })?; + + handle.progress(true)?; + handle.progress_function(move |dl_total, dl_cur, _, _| { + tls::with(|downloads| match downloads { + Some(d) => d.progress(token, dl_total as u64, dl_cur as u64), + None => false, + }) + })?; + + // If the progress bar isn't enabled then it may be awhile before the + // first crate finishes downloading so we inform immediately that we're + // downloading crates here. + if self.downloads_finished == 0 + && self.pending.is_empty() + && !self.progress.borrow().as_ref().unwrap().is_enabled() + { + self.set + .config + .shell() + .status("Downloading", "crates ...")?; + } + + let dl = Download { + token, + data: RefCell::new(Vec::new()), + id, + url, + descriptor, + total: Cell::new(0), + current: Cell::new(0), + start: Instant::now(), + timed_out: Cell::new(None), + retry: Retry::new(self.set.config)?, + }; + self.enqueue(dl, handle)?; + self.tick(WhyTick::DownloadStarted)?; + + Ok(None) + } + + /// Returns the number of crates that are still downloading. + pub fn remaining(&self) -> usize { + self.pending.len() + } + + /// Blocks the current thread waiting for a package to finish downloading. + /// + /// This method will wait for a previously enqueued package to finish + /// downloading and return a reference to it after it's done downloading. + /// + /// # Panics + /// + /// This function will panic if there are no remaining downloads. + pub fn wait(&mut self) -> CargoResult<&'a Package> { + let (dl, data) = loop { + assert_eq!(self.pending.len(), self.pending_ids.len()); + let (token, result) = self.wait_for_curl()?; + debug!("{} finished with {:?}", token, result); + + let (mut dl, handle) = self + .pending + .remove(&token) + .expect("got a token for a non-in-progress transfer"); + let data = mem::replace(&mut *dl.data.borrow_mut(), Vec::new()); + let mut handle = self.set.multi.remove(handle)?; + self.pending_ids.remove(&dl.id); + + // Check if this was a spurious error. If it was a spurious error + // then we want to re-enqueue our request for another attempt and + // then we wait for another request to finish. + let ret = { + let timed_out = &dl.timed_out; + let url = &dl.url; + dl.retry + .r#try(|| { + if let Err(e) = result { + // If this error is "aborted by callback" then that's + // probably because our progress callback aborted due to + // a timeout. We'll find out by looking at the + // `timed_out` field, looking for a descriptive message. + // If one is found we switch the error code (to ensure + // it's flagged as spurious) and then attach our extra + // information to the error. + if !e.is_aborted_by_callback() { + return Err(e.into()); + } + + return Err(match timed_out.replace(None) { + Some(msg) => { + let code = curl_sys::CURLE_OPERATION_TIMEDOUT; + let mut err = curl::Error::new(code); + err.set_extra(msg); + err + } + None => e, + } + .into()); + } + + let code = handle.response_code()?; + if code != 200 && code != 0 { + let url = handle.effective_url()?.unwrap_or(url); + return Err(HttpNot200 { + code, + url: url.to_string(), + } + .into()); + } + Ok(()) + }) + .chain_err(|| format!("failed to download from `{}`", dl.url))? + }; + match ret { + Some(()) => break (dl, data), + None => { + self.pending_ids.insert(dl.id); + self.enqueue(dl, handle)? + } + } + }; + + // If the progress bar isn't enabled then we still want to provide some + // semblance of progress of how we're downloading crates, and if the + // progress bar is enabled this provides a good log of what's happening. + self.progress.borrow_mut().as_mut().unwrap().clear(); + self.set + .config + .shell() + .status("Downloaded", &dl.descriptor)?; + + self.downloads_finished += 1; + self.downloaded_bytes += dl.total.get(); + if dl.total.get() > self.largest.0 { + self.largest = (dl.total.get(), dl.id.name().to_string()); + } + + // We're about to synchronously extract the crate below. While we're + // doing that our download progress won't actually be updated, nor do we + // have a great view into the progress of the extraction. Let's prepare + // the user for this CPU-heavy step if it looks like it'll take some + // time to do so. + if dl.total.get() < ByteSize::kb(400).0 { + self.tick(WhyTick::DownloadFinished)?; + } else { + self.tick(WhyTick::Extracting(&dl.id.name()))?; + } + + // Inform the original source that the download is finished which + // should allow us to actually get the package and fill it in now. + let mut sources = self.set.sources.borrow_mut(); + let source = sources + .get_mut(dl.id.source_id()) + .ok_or_else(|| internal(format!("couldn't find source for `{}`", dl.id)))?; + let start = Instant::now(); + let pkg = source.finish_download(dl.id, data)?; + + // Assume that no time has passed while we were calling + // `finish_download`, update all speed checks and timeout limits of all + // active downloads to make sure they don't fire because of a slowly + // extracted tarball. + let finish_dur = start.elapsed(); + self.updated_at.set(self.updated_at.get() + finish_dur); + self.next_speed_check + .set(self.next_speed_check.get() + finish_dur); + + let slot = &self.set.packages[&dl.id]; + assert!(slot.fill(pkg).is_ok()); + Ok(slot.borrow().unwrap()) + } + + fn enqueue(&mut self, dl: Download<'cfg>, handle: Easy) -> CargoResult<()> { + let mut handle = self.set.multi.add(handle)?; + let now = Instant::now(); + handle.set_token(dl.token)?; + self.updated_at.set(now); + self.next_speed_check.set(now + self.timeout.dur); + self.next_speed_check_bytes_threshold + .set(u64::from(self.timeout.low_speed_limit)); + dl.timed_out.set(None); + dl.current.set(0); + dl.total.set(0); + self.pending.insert(dl.token, (dl, handle)); + Ok(()) + } + + fn wait_for_curl(&mut self) -> CargoResult<(usize, Result<(), curl::Error>)> { + // This is the main workhorse loop. We use libcurl's portable `wait` + // method to actually perform blocking. This isn't necessarily too + // efficient in terms of fd management, but we should only be juggling + // a few anyway. + // + // Here we start off by asking the `multi` handle to do some work via + // the `perform` method. This will actually do I/O work (non-blocking) + // and attempt to make progress. Afterwards we ask about the `messages` + // contained in the handle which will inform us if anything has finished + // transferring. + // + // If we've got a finished transfer after all that work we break out + // and process the finished transfer at the end. Otherwise we need to + // actually block waiting for I/O to happen, which we achieve with the + // `wait` method on `multi`. + loop { + let n = tls::set(self, || { + self.set + .multi + .perform() + .chain_err(|| "failed to perform http requests") + })?; + debug!("handles remaining: {}", n); + let results = &mut self.results; + let pending = &self.pending; + self.set.multi.messages(|msg| { + let token = msg.token().expect("failed to read token"); + let handle = &pending[&token].1; + if let Some(result) = msg.result_for(&handle) { + results.push((token, result)); + } else { + debug!("message without a result (?)"); + } + }); + + if let Some(pair) = results.pop() { + break Ok(pair); + } + assert!(!self.pending.is_empty()); + let timeout = self + .set + .multi + .get_timeout()? + .unwrap_or_else(|| Duration::new(5, 0)); + self.set + .multi + .wait(&mut [], timeout) + .chain_err(|| "failed to wait on curl `Multi`")?; + } + } + + fn progress(&self, token: usize, total: u64, cur: u64) -> bool { + let dl = &self.pending[&token].0; + dl.total.set(total); + let now = Instant::now(); + if cur != dl.current.get() { + let delta = cur - dl.current.get(); + let threshold = self.next_speed_check_bytes_threshold.get(); + + dl.current.set(cur); + self.updated_at.set(now); + + if delta >= threshold { + self.next_speed_check.set(now + self.timeout.dur); + self.next_speed_check_bytes_threshold + .set(u64::from(self.timeout.low_speed_limit)); + } else { + self.next_speed_check_bytes_threshold.set(threshold - delta); + } + } + if self.tick(WhyTick::DownloadUpdate).is_err() { + return false; + } + + // If we've spent too long not actually receiving any data we time out. + if now - self.updated_at.get() > self.timeout.dur { + self.updated_at.set(now); + let msg = format!( + "failed to download any data for `{}` within {}s", + dl.id, + self.timeout.dur.as_secs() + ); + dl.timed_out.set(Some(msg)); + return false; + } + + // If we reached the point in time that we need to check our speed + // limit, see if we've transferred enough data during this threshold. If + // it fails this check then we fail because the download is going too + // slowly. + if now >= self.next_speed_check.get() { + self.next_speed_check.set(now + self.timeout.dur); + assert!(self.next_speed_check_bytes_threshold.get() > 0); + let msg = format!( + "download of `{}` failed to transfer more \ + than {} bytes in {}s", + dl.id, + self.timeout.low_speed_limit, + self.timeout.dur.as_secs() + ); + dl.timed_out.set(Some(msg)); + return false; + } + + true + } + + fn tick(&self, why: WhyTick<'_>) -> CargoResult<()> { + let mut progress = self.progress.borrow_mut(); + let progress = progress.as_mut().unwrap(); + + if let WhyTick::DownloadUpdate = why { + if !progress.update_allowed() { + return Ok(()); + } + } + let pending = self.pending.len(); + let mut msg = if pending == 1 { + format!("{} crate", pending) + } else { + format!("{} crates", pending) + }; + match why { + WhyTick::Extracting(krate) => { + msg.push_str(&format!(", extracting {} ...", krate)); + } + _ => { + let mut dur = Duration::new(0, 0); + let mut remaining = 0; + for (dl, _) in self.pending.values() { + dur += dl.start.elapsed(); + // If the total/current look weird just throw out the data + // point, sounds like curl has more to learn before we have + // the true information. + if dl.total.get() >= dl.current.get() { + remaining += dl.total.get() - dl.current.get(); + } + } + if remaining > 0 && dur > Duration::from_millis(500) { + msg.push_str(&format!(", remaining bytes: {}", ByteSize(remaining))); + } + } + } + progress.print_now(&msg) + } +} + +#[derive(Copy, Clone)] +enum WhyTick<'a> { + DownloadStarted, + DownloadUpdate, + DownloadFinished, + Extracting(&'a str), +} + +impl<'a, 'cfg> Drop for Downloads<'a, 'cfg> { + fn drop(&mut self) { + self.set.downloading.set(false); + let progress = self.progress.get_mut().take().unwrap(); + // Don't print a download summary if we're not using a progress bar, + // we've already printed lots of `Downloading...` items. + if !progress.is_enabled() { + return; + } + // If we didn't download anything, no need for a summary. + if self.downloads_finished == 0 { + return; + } + // If an error happened, let's not clutter up the output. + if !self.success { + return; + } + let mut status = format!( + "{} crates ({}) in {}", + self.downloads_finished, + ByteSize(self.downloaded_bytes), + util::elapsed(self.start.elapsed()) + ); + if self.largest.0 > ByteSize::mb(1).0 { + status.push_str(&format!( + " (largest was `{}` at {})", + self.largest.1, + ByteSize(self.largest.0), + )); + } + // Clear progress before displaying final summary. + drop(progress); + drop(self.set.config.shell().status("Downloaded", status)); + } +} + +mod tls { + use std::cell::Cell; + + use super::Downloads; + + thread_local!(static PTR: Cell = Cell::new(0)); + + pub(crate) fn with(f: impl FnOnce(Option<&Downloads<'_, '_>>) -> R) -> R { + let ptr = PTR.with(|p| p.get()); + if ptr == 0 { + f(None) + } else { + unsafe { f(Some(&*(ptr as *const Downloads<'_, '_>))) } + } + } + + pub(crate) fn set(dl: &Downloads<'_, '_>, f: impl FnOnce() -> R) -> R { + struct Reset<'a, T: Copy>(&'a Cell, T); + + impl<'a, T: Copy> Drop for Reset<'a, T> { + fn drop(&mut self) { + self.0.set(self.1); + } + } + + PTR.with(|p| { + let _reset = Reset(p, p.get()); + p.set(dl as *const Downloads<'_, '_> as usize); + f() + }) + } +} diff --git a/src/cargo/core/package_id.rs b/src/cargo/core/package_id.rs new file mode 100644 index 000000000..11c86e900 --- /dev/null +++ b/src/cargo/core/package_id.rs @@ -0,0 +1,244 @@ +use std::collections::HashSet; +use std::fmt::{self, Formatter}; +use std::hash; +use std::hash::Hash; +use std::path::Path; +use std::ptr; +use std::sync::Mutex; + +use semver; +use serde::de; +use serde::ser; + +use crate::core::interning::InternedString; +use crate::core::source::SourceId; +use crate::util::{CargoResult, ToSemver}; + +lazy_static::lazy_static! { + static ref PACKAGE_ID_CACHE: Mutex> = + Mutex::new(HashSet::new()); +} + +/// Identifier for a specific version of a package in a specific source. +#[derive(Clone, Copy, Eq, PartialOrd, Ord)] +pub struct PackageId { + inner: &'static PackageIdInner, +} + +#[derive(PartialOrd, Eq, Ord)] +struct PackageIdInner { + name: InternedString, + version: semver::Version, + source_id: SourceId, +} + +// Custom equality that uses full equality of SourceId, rather than its custom equality. +impl PartialEq for PackageIdInner { + fn eq(&self, other: &Self) -> bool { + self.name == other.name + && self.version == other.version + && self.source_id.full_eq(other.source_id) + } +} + +// Custom hash that is coherent with the custom equality above. +impl Hash for PackageIdInner { + fn hash(&self, into: &mut S) { + self.name.hash(into); + self.version.hash(into); + self.source_id.full_hash(into); + } +} + +impl ser::Serialize for PackageId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + s.collect_str(&format_args!( + "{} {} ({})", + self.inner.name, + self.inner.version, + self.inner.source_id.to_url() + )) + } +} + +impl<'de> de::Deserialize<'de> for PackageId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + let mut s = string.splitn(3, ' '); + let name = s.next().unwrap(); + let name = InternedString::new(name); + let version = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let version = version.to_semver().map_err(de::Error::custom)?; + let url = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let url = if url.starts_with('(') && url.ends_with(')') { + &url[1..url.len() - 1] + } else { + return Err(de::Error::custom("invalid serialized PackageId")); + }; + let source_id = SourceId::from_url(url).map_err(de::Error::custom)?; + + Ok(PackageId::pure(name, version, source_id)) + } +} + +impl PartialEq for PackageId { + fn eq(&self, other: &PackageId) -> bool { + if ptr::eq(self.inner, other.inner) { + return true; + } + self.inner.name == other.inner.name + && self.inner.version == other.inner.version + && self.inner.source_id == other.inner.source_id + } +} + +impl<'a> Hash for PackageId { + fn hash(&self, state: &mut S) { + self.inner.name.hash(state); + self.inner.version.hash(state); + self.inner.source_id.hash(state); + } +} + +impl PackageId { + pub fn new(name: &str, version: T, sid: SourceId) -> CargoResult { + let v = version.to_semver()?; + Ok(PackageId::pure(InternedString::new(name), v, sid)) + } + + pub fn pure(name: InternedString, version: semver::Version, source_id: SourceId) -> PackageId { + let inner = PackageIdInner { + name, + version, + source_id, + }; + let mut cache = PACKAGE_ID_CACHE.lock().unwrap(); + let inner = cache.get(&inner).cloned().unwrap_or_else(|| { + let inner = Box::leak(Box::new(inner)); + cache.insert(inner); + inner + }); + PackageId { inner } + } + + pub fn name(self) -> InternedString { + self.inner.name + } + pub fn version(self) -> &'static semver::Version { + &self.inner.version + } + pub fn source_id(self) -> SourceId { + self.inner.source_id + } + + pub fn with_precise(self, precise: Option) -> PackageId { + PackageId::pure( + self.inner.name, + self.inner.version.clone(), + self.inner.source_id.with_precise(precise), + ) + } + + pub fn with_source_id(self, source: SourceId) -> PackageId { + PackageId::pure(self.inner.name, self.inner.version.clone(), source) + } + + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Self { + if self.source_id() == to_replace { + self.with_source_id(replace_with) + } else { + self + } + } + + pub fn stable_hash(self, workspace: &Path) -> PackageIdStableHash<'_> { + PackageIdStableHash(self, workspace) + } +} + +pub struct PackageIdStableHash<'a>(PackageId, &'a Path); + +impl<'a> Hash for PackageIdStableHash<'a> { + fn hash(&self, state: &mut S) { + self.0.inner.name.hash(state); + self.0.inner.version.hash(state); + self.0.inner.source_id.stable_hash(self.1, state); + } +} + +impl fmt::Display for PackageId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{} v{}", self.inner.name, self.inner.version)?; + + if !self.inner.source_id.is_default_registry() { + write!(f, " ({})", self.inner.source_id)?; + } + + Ok(()) + } +} + +impl fmt::Debug for PackageId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("PackageId") + .field("name", &self.inner.name) + .field("version", &self.inner.version.to_string()) + .field("source", &self.inner.source_id.to_string()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::PackageId; + use crate::core::source::SourceId; + use crate::sources::CRATES_IO_INDEX; + use crate::util::ToUrl; + + #[test] + fn invalid_version_handled_nicely() { + let loc = CRATES_IO_INDEX.to_url().unwrap(); + let repo = SourceId::for_registry(&loc).unwrap(); + + assert!(PackageId::new("foo", "1.0", repo).is_err()); + assert!(PackageId::new("foo", "1", repo).is_err()); + assert!(PackageId::new("foo", "bar", repo).is_err()); + assert!(PackageId::new("foo", "", repo).is_err()); + } + + #[test] + fn debug() { + let loc = CRATES_IO_INDEX.to_url().unwrap(); + let pkg_id = PackageId::new("foo", "1.0.0", SourceId::for_registry(&loc).unwrap()).unwrap(); + assert_eq!(r#"PackageId { name: "foo", version: "1.0.0", source: "registry `https://github.com/rust-lang/crates.io-index`" }"#, format!("{:?}", pkg_id)); + + let pretty = r#" +PackageId { + name: "foo", + version: "1.0.0", + source: "registry `https://github.com/rust-lang/crates.io-index`" +} +"# + .trim(); + assert_eq!(pretty, format!("{:#?}", pkg_id)); + } + + #[test] + fn display() { + let loc = CRATES_IO_INDEX.to_url().unwrap(); + let pkg_id = PackageId::new("foo", "1.0.0", SourceId::for_registry(&loc).unwrap()).unwrap(); + assert_eq!("foo v1.0.0", pkg_id.to_string()); + } +} diff --git a/src/cargo/core/package_id_spec.rs b/src/cargo/core/package_id_spec.rs new file mode 100644 index 000000000..fc7784ea0 --- /dev/null +++ b/src/cargo/core/package_id_spec.rs @@ -0,0 +1,375 @@ +use std::collections::HashMap; +use std::fmt; + +use semver::Version; +use serde::{de, ser}; +use url::Url; + +use crate::core::PackageId; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{validate_package_name, ToSemver, ToUrl}; + +/// Some or all of the data required to identify a package: +/// +/// 1. the package name (a `String`, required) +/// 2. the package version (a `Version`, optional) +/// 3. the package source (a `Url`, optional) +/// +/// If any of the optional fields are omitted, then the package ID may be ambiguous, there may be +/// more than one package/version/url combo that will match. However, often just the name is +/// sufficient to uniquely define a package ID. +#[derive(Clone, PartialEq, Eq, Debug, Hash, Ord, PartialOrd)] +pub struct PackageIdSpec { + name: String, + version: Option, + url: Option, +} + +impl PackageIdSpec { + /// Parses a spec string and returns a `PackageIdSpec` if the string was valid. + /// + /// # Examples + /// Some examples of valid strings + /// + /// ``` + /// use cargo::core::PackageIdSpec; + /// + /// let specs = vec![ + /// "http://crates.io/foo#1.2.3", + /// "http://crates.io/foo#bar:1.2.3", + /// "crates.io/foo", + /// "crates.io/foo#1.2.3", + /// "crates.io/foo#bar", + /// "crates.io/foo#bar:1.2.3", + /// "foo", + /// "foo:1.2.3", + /// ]; + /// for spec in specs { + /// assert!(PackageIdSpec::parse(spec).is_ok()); + /// } + pub fn parse(spec: &str) -> CargoResult { + if spec.contains('/') { + if let Ok(url) = spec.to_url() { + return PackageIdSpec::from_url(url); + } + if !spec.contains("://") { + if let Ok(url) = Url::parse(&format!("cargo://{}", spec)) { + return PackageIdSpec::from_url(url); + } + } + } + let mut parts = spec.splitn(2, ':'); + let name = parts.next().unwrap(); + let version = match parts.next() { + Some(version) => Some(version.to_semver()?), + None => None, + }; + validate_package_name(name, "pkgid", "")?; + Ok(PackageIdSpec { + name: name.to_string(), + version, + url: None, + }) + } + + /// Roughly equivalent to `PackageIdSpec::parse(spec)?.query(i)` + pub fn query_str(spec: &str, i: I) -> CargoResult + where + I: IntoIterator, + { + let spec = PackageIdSpec::parse(spec) + .chain_err(|| failure::format_err!("invalid package ID specification: `{}`", spec))?; + spec.query(i) + } + + /// Convert a `PackageId` to a `PackageIdSpec`, which will have both the `Version` and `Url` + /// fields filled in. + pub fn from_package_id(package_id: PackageId) -> PackageIdSpec { + PackageIdSpec { + name: package_id.name().to_string(), + version: Some(package_id.version().clone()), + url: Some(package_id.source_id().url().clone()), + } + } + + /// Tries to convert a valid `Url` to a `PackageIdSpec`. + fn from_url(mut url: Url) -> CargoResult { + if url.query().is_some() { + failure::bail!("cannot have a query string in a pkgid: {}", url) + } + let frag = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + let (name, version) = { + let mut path = url + .path_segments() + .ok_or_else(|| failure::format_err!("pkgid urls must have a path: {}", url))?; + let path_name = path.next_back().ok_or_else(|| { + failure::format_err!( + "pkgid urls must have at least one path \ + component: {}", + url + ) + })?; + match frag { + Some(fragment) => { + let mut parts = fragment.splitn(2, ':'); + let name_or_version = parts.next().unwrap(); + match parts.next() { + Some(part) => { + let version = part.to_semver()?; + (name_or_version.to_string(), Some(version)) + } + None => { + if name_or_version.chars().next().unwrap().is_alphabetic() { + (name_or_version.to_string(), None) + } else { + let version = name_or_version.to_semver()?; + (path_name.to_string(), Some(version)) + } + } + } + } + None => (path_name.to_string(), None), + } + }; + Ok(PackageIdSpec { + name, + version, + url: Some(url), + }) + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn version(&self) -> Option<&Version> { + self.version.as_ref() + } + + pub fn url(&self) -> Option<&Url> { + self.url.as_ref() + } + + pub fn set_url(&mut self, url: Url) { + self.url = Some(url); + } + + /// Checks whether the given `PackageId` matches the `PackageIdSpec`. + pub fn matches(&self, package_id: PackageId) -> bool { + if self.name() != &*package_id.name() { + return false; + } + + if let Some(ref v) = self.version { + if v != package_id.version() { + return false; + } + } + + match self.url { + Some(ref u) => u == package_id.source_id().url(), + None => true, + } + } + + /// Checks a list of `PackageId`s to find 1 that matches this `PackageIdSpec`. If 0, 2, or + /// more are found, then this returns an error. + pub fn query(&self, i: I) -> CargoResult + where + I: IntoIterator, + { + let mut ids = i.into_iter().filter(|p| self.matches(*p)); + let ret = match ids.next() { + Some(id) => id, + None => failure::bail!( + "package ID specification `{}` \ + matched no packages", + self + ), + }; + return match ids.next() { + Some(other) => { + let mut msg = format!( + "There are multiple `{}` packages in \ + your project, and the specification \ + `{}` is ambiguous.\n\ + Please re-run this command \ + with `-p ` where `` is one \ + of the following:", + self.name(), + self + ); + let mut vec = vec![ret, other]; + vec.extend(ids); + minimize(&mut msg, &vec, self); + Err(failure::format_err!("{}", msg)) + } + None => Ok(ret), + }; + + fn minimize(msg: &mut String, ids: &[PackageId], spec: &PackageIdSpec) { + let mut version_cnt = HashMap::new(); + for id in ids { + *version_cnt.entry(id.version()).or_insert(0) += 1; + } + for id in ids { + if version_cnt[id.version()] == 1 { + msg.push_str(&format!("\n {}:{}", spec.name(), id.version())); + } else { + msg.push_str(&format!("\n {}", PackageIdSpec::from_package_id(*id))); + } + } + } + } +} + +impl fmt::Display for PackageIdSpec { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut printed_name = false; + match self.url { + Some(ref url) => { + if url.scheme() == "cargo" { + write!(f, "{}{}", url.host().unwrap(), url.path())?; + } else { + write!(f, "{}", url)?; + } + if url.path_segments().unwrap().next_back().unwrap() != self.name { + printed_name = true; + write!(f, "#{}", self.name)?; + } + } + None => { + printed_name = true; + write!(f, "{}", self.name)? + } + } + if let Some(ref v) = self.version { + write!(f, "{}{}", if printed_name { ":" } else { "#" }, v)?; + } + Ok(()) + } +} + +impl ser::Serialize for PackageIdSpec { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl<'de> de::Deserialize<'de> for PackageIdSpec { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + PackageIdSpec::parse(&string).map_err(de::Error::custom) + } +} + +#[cfg(test)] +mod tests { + use super::PackageIdSpec; + use crate::core::{PackageId, SourceId}; + use crate::util::ToSemver; + use url::Url; + + #[test] + fn good_parsing() { + fn ok(spec: &str, expected: PackageIdSpec) { + let parsed = PackageIdSpec::parse(spec).unwrap(); + assert_eq!(parsed, expected); + assert_eq!(parsed.to_string(), spec); + } + + ok( + "http://crates.io/foo#1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some("1.2.3".to_semver().unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }, + ); + ok( + "http://crates.io/foo#bar:1.2.3", + PackageIdSpec { + name: "bar".to_string(), + version: Some("1.2.3".to_semver().unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo", + PackageIdSpec { + name: "foo".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some("1.2.3".to_semver().unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#bar", + PackageIdSpec { + name: "bar".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "crates.io/foo#bar:1.2.3", + PackageIdSpec { + name: "bar".to_string(), + version: Some("1.2.3".to_semver().unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }, + ); + ok( + "foo", + PackageIdSpec { + name: "foo".to_string(), + version: None, + url: None, + }, + ); + ok( + "foo:1.2.3", + PackageIdSpec { + name: "foo".to_string(), + version: Some("1.2.3".to_semver().unwrap()), + url: None, + }, + ); + } + + #[test] + fn bad_parsing() { + assert!(PackageIdSpec::parse("baz:").is_err()); + assert!(PackageIdSpec::parse("baz:*").is_err()); + assert!(PackageIdSpec::parse("baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://#baz:1.0").is_err()); + } + + #[test] + fn matching() { + let url = Url::parse("http://example.com").unwrap(); + let sid = SourceId::for_registry(&url).unwrap(); + let foo = PackageId::new("foo", "1.2.3", sid).unwrap(); + let bar = PackageId::new("bar", "1.2.3", sid).unwrap(); + + assert!(PackageIdSpec::parse("foo").unwrap().matches(foo)); + assert!(!PackageIdSpec::parse("foo").unwrap().matches(bar)); + assert!(PackageIdSpec::parse("foo:1.2.3").unwrap().matches(foo)); + assert!(!PackageIdSpec::parse("foo:1.2.2").unwrap().matches(foo)); + } +} diff --git a/src/cargo/core/profiles.rs b/src/cargo/core/profiles.rs new file mode 100644 index 000000000..18355b9b4 --- /dev/null +++ b/src/cargo/core/profiles.rs @@ -0,0 +1,680 @@ +use std::collections::HashSet; +use std::{cmp, env, fmt, hash}; + +use serde::Deserialize; + +use crate::core::compiler::CompileMode; +use crate::core::interning::InternedString; +use crate::core::{Features, PackageId, PackageIdSpec, PackageSet, Shell}; +use crate::util::errors::CargoResultExt; +use crate::util::lev_distance::lev_distance; +use crate::util::toml::{ProfilePackageSpec, StringOrBool, TomlProfile, TomlProfiles, U32OrBool}; +use crate::util::{CargoResult, Config}; + +/// Collection of all user profiles. +#[derive(Clone, Debug)] +pub struct Profiles { + dev: ProfileMaker, + release: ProfileMaker, + test: ProfileMaker, + bench: ProfileMaker, + doc: ProfileMaker, + /// Incremental compilation can be overridden globally via: + /// - `CARGO_INCREMENTAL` environment variable. + /// - `build.incremental` config value. + incremental: Option, +} + +impl Profiles { + pub fn new( + profiles: Option<&TomlProfiles>, + config: &Config, + features: &Features, + warnings: &mut Vec, + ) -> CargoResult { + if let Some(profiles) = profiles { + profiles.validate(features, warnings)?; + } + + let config_profiles = config.profiles()?; + + let incremental = match env::var_os("CARGO_INCREMENTAL") { + Some(v) => Some(v == "1"), + None => config.get::>("build.incremental")?, + }; + + Ok(Profiles { + dev: ProfileMaker { + default: Profile::default_dev(), + toml: profiles.and_then(|p| p.dev.clone()), + config: config_profiles.dev.clone(), + }, + release: ProfileMaker { + default: Profile::default_release(), + toml: profiles.and_then(|p| p.release.clone()), + config: config_profiles.release.clone(), + }, + test: ProfileMaker { + default: Profile::default_test(), + toml: profiles.and_then(|p| p.test.clone()), + config: None, + }, + bench: ProfileMaker { + default: Profile::default_bench(), + toml: profiles.and_then(|p| p.bench.clone()), + config: None, + }, + doc: ProfileMaker { + default: Profile::default_doc(), + toml: profiles.and_then(|p| p.doc.clone()), + config: None, + }, + incremental, + }) + } + + /// Retrieves the profile for a target. + /// `is_member` is whether or not this package is a member of the + /// workspace. + pub fn get_profile( + &self, + pkg_id: PackageId, + is_member: bool, + unit_for: UnitFor, + mode: CompileMode, + release: bool, + ) -> Profile { + let maker = match mode { + CompileMode::Test | CompileMode::Bench => { + if release { + &self.bench + } else { + &self.test + } + } + CompileMode::Build + | CompileMode::Check { .. } + | CompileMode::Doctest + | CompileMode::RunCustomBuild => { + // Note: `RunCustomBuild` doesn't normally use this code path. + // `build_unit_profiles` normally ensures that it selects the + // ancestor's profile. However, `cargo clean -p` can hit this + // path. + if release { + &self.release + } else { + &self.dev + } + } + CompileMode::Doc { .. } => &self.doc, + }; + let mut profile = maker.get_profile(Some(pkg_id), is_member, unit_for); + // `panic` should not be set for tests/benches, or any of their + // dependencies. + if !unit_for.is_panic_ok() || mode.is_any_test() { + profile.panic = None; + } + + // Incremental can be globally overridden. + if let Some(v) = self.incremental { + profile.incremental = v; + } + // Only enable incremental compilation for sources the user can + // modify (aka path sources). For things that change infrequently, + // non-incremental builds yield better performance in the compiler + // itself (aka crates.io / git dependencies) + // + // (see also https://github.com/rust-lang/cargo/issues/3972) + if !pkg_id.source_id().is_path() { + profile.incremental = false; + } + profile + } + + /// The profile for *running* a `build.rs` script is only used for setting + /// a few environment variables. To ensure proper de-duplication of the + /// running `Unit`, this uses a stripped-down profile (so that unrelated + /// profile flags don't cause `build.rs` to needlessly run multiple + /// times). + pub fn get_profile_run_custom_build(&self, for_unit_profile: &Profile) -> Profile { + let mut result = Profile::default(); + result.debuginfo = for_unit_profile.debuginfo; + result.opt_level = for_unit_profile.opt_level; + result + } + + /// This returns a generic base profile. This is currently used for the + /// `[Finished]` line. It is not entirely accurate, since it doesn't + /// select for the package that was actually built. + pub fn base_profile(&self, release: bool) -> Profile { + if release { + self.release.get_profile(None, true, UnitFor::new_normal()) + } else { + self.dev.get_profile(None, true, UnitFor::new_normal()) + } + } + + /// Used to check for overrides for non-existing packages. + pub fn validate_packages( + &self, + shell: &mut Shell, + packages: &PackageSet<'_>, + ) -> CargoResult<()> { + self.dev.validate_packages(shell, packages)?; + self.release.validate_packages(shell, packages)?; + self.test.validate_packages(shell, packages)?; + self.bench.validate_packages(shell, packages)?; + self.doc.validate_packages(shell, packages)?; + Ok(()) + } +} + +/// An object used for handling the profile override hierarchy. +/// +/// The precedence of profiles are (first one wins): +/// - Profiles in `.cargo/config` files (using same order as below). +/// - [profile.dev.overrides.name] -- a named package. +/// - [profile.dev.overrides."*"] -- this cannot apply to workspace members. +/// - [profile.dev.build-override] -- this can only apply to `build.rs` scripts +/// and their dependencies. +/// - [profile.dev] +/// - Default (hard-coded) values. +#[derive(Debug, Clone)] +struct ProfileMaker { + /// The starting, hard-coded defaults for the profile. + default: Profile, + /// The profile from the `Cargo.toml` manifest. + toml: Option, + /// Profile loaded from `.cargo/config` files. + config: Option, +} + +impl ProfileMaker { + fn get_profile( + &self, + pkg_id: Option, + is_member: bool, + unit_for: UnitFor, + ) -> Profile { + let mut profile = self.default; + if let Some(ref toml) = self.toml { + merge_toml(pkg_id, is_member, unit_for, &mut profile, toml); + } + if let Some(ref toml) = self.config { + merge_toml(pkg_id, is_member, unit_for, &mut profile, toml); + } + profile + } + + fn validate_packages(&self, shell: &mut Shell, packages: &PackageSet<'_>) -> CargoResult<()> { + self.validate_packages_toml(shell, packages, &self.toml, true)?; + self.validate_packages_toml(shell, packages, &self.config, false)?; + Ok(()) + } + + fn validate_packages_toml( + &self, + shell: &mut Shell, + packages: &PackageSet<'_>, + toml: &Option, + warn_unmatched: bool, + ) -> CargoResult<()> { + let toml = match *toml { + Some(ref toml) => toml, + None => return Ok(()), + }; + let overrides = match toml.overrides { + Some(ref overrides) => overrides, + None => return Ok(()), + }; + // Verify that a package doesn't match multiple spec overrides. + let mut found = HashSet::new(); + for pkg_id in packages.package_ids() { + let matches: Vec<&PackageIdSpec> = overrides + .keys() + .filter_map(|key| match *key { + ProfilePackageSpec::All => None, + ProfilePackageSpec::Spec(ref spec) => { + if spec.matches(pkg_id) { + Some(spec) + } else { + None + } + } + }) + .collect(); + match matches.len() { + 0 => {} + 1 => { + found.insert(matches[0].clone()); + } + _ => { + let specs = matches + .iter() + .map(|spec| spec.to_string()) + .collect::>() + .join(", "); + failure::bail!( + "multiple profile overrides in profile `{}` match package `{}`\n\ + found profile override specs: {}", + self.default.name, + pkg_id, + specs + ); + } + } + } + + if !warn_unmatched { + return Ok(()); + } + // Verify every override matches at least one package. + let missing_specs = overrides.keys().filter_map(|key| { + if let ProfilePackageSpec::Spec(ref spec) = *key { + if !found.contains(spec) { + return Some(spec); + } + } + None + }); + for spec in missing_specs { + // See if there is an exact name match. + let name_matches: Vec = packages + .package_ids() + .filter_map(|pkg_id| { + if pkg_id.name().as_str() == spec.name() { + Some(pkg_id.to_string()) + } else { + None + } + }) + .collect(); + if name_matches.is_empty() { + let suggestion = packages + .package_ids() + .map(|p| (lev_distance(spec.name(), &p.name()), p.name())) + .filter(|&(d, _)| d < 4) + .min_by_key(|p| p.0) + .map(|p| p.1); + match suggestion { + Some(p) => shell.warn(format!( + "profile override spec `{}` did not match any packages\n\n\ + Did you mean `{}`?", + spec, p + ))?, + None => shell.warn(format!( + "profile override spec `{}` did not match any packages", + spec + ))?, + } + } else { + shell.warn(format!( + "version or URL in profile override spec `{}` does not \ + match any of the packages: {}", + spec, + name_matches.join(", ") + ))?; + } + } + Ok(()) + } +} + +fn merge_toml( + pkg_id: Option, + is_member: bool, + unit_for: UnitFor, + profile: &mut Profile, + toml: &TomlProfile, +) { + merge_profile(profile, toml); + if unit_for.is_custom_build() { + if let Some(ref build_override) = toml.build_override { + merge_profile(profile, build_override); + } + } + if let Some(ref overrides) = toml.overrides { + if !is_member { + if let Some(all) = overrides.get(&ProfilePackageSpec::All) { + merge_profile(profile, all); + } + } + if let Some(pkg_id) = pkg_id { + let mut matches = overrides + .iter() + .filter_map(|(key, spec_profile)| match *key { + ProfilePackageSpec::All => None, + ProfilePackageSpec::Spec(ref s) => { + if s.matches(pkg_id) { + Some(spec_profile) + } else { + None + } + } + }); + if let Some(spec_profile) = matches.next() { + merge_profile(profile, spec_profile); + // `validate_packages` should ensure that there are + // no additional matches. + assert!( + matches.next().is_none(), + "package `{}` matched multiple profile overrides", + pkg_id + ); + } + } + } +} + +fn merge_profile(profile: &mut Profile, toml: &TomlProfile) { + if let Some(ref opt_level) = toml.opt_level { + profile.opt_level = InternedString::new(&opt_level.0); + } + match toml.lto { + Some(StringOrBool::Bool(b)) => profile.lto = Lto::Bool(b), + Some(StringOrBool::String(ref n)) => profile.lto = Lto::Named(InternedString::new(n)), + None => {} + } + if toml.codegen_units.is_some() { + profile.codegen_units = toml.codegen_units; + } + match toml.debug { + Some(U32OrBool::U32(debug)) => profile.debuginfo = Some(debug), + Some(U32OrBool::Bool(true)) => profile.debuginfo = Some(2), + Some(U32OrBool::Bool(false)) => profile.debuginfo = None, + None => {} + } + if let Some(debug_assertions) = toml.debug_assertions { + profile.debug_assertions = debug_assertions; + } + if let Some(rpath) = toml.rpath { + profile.rpath = rpath; + } + if let Some(ref panic) = toml.panic { + profile.panic = Some(InternedString::new(panic)); + } + if let Some(overflow_checks) = toml.overflow_checks { + profile.overflow_checks = overflow_checks; + } + if let Some(incremental) = toml.incremental { + profile.incremental = incremental; + } +} + +/// Profile settings used to determine which compiler flags to use for a +/// target. +#[derive(Clone, Copy, Eq, PartialOrd, Ord)] +pub struct Profile { + pub name: &'static str, + pub opt_level: InternedString, + pub lto: Lto, + // `None` means use rustc default. + pub codegen_units: Option, + pub debuginfo: Option, + pub debug_assertions: bool, + pub overflow_checks: bool, + pub rpath: bool, + pub incremental: bool, + pub panic: Option, +} + +impl Default for Profile { + fn default() -> Profile { + Profile { + name: "", + opt_level: InternedString::new("0"), + lto: Lto::Bool(false), + codegen_units: None, + debuginfo: None, + debug_assertions: false, + overflow_checks: false, + rpath: false, + incremental: false, + panic: None, + } + } +} + +compact_debug! { + impl fmt::Debug for Profile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let (default, default_name) = match self.name { + "dev" => (Profile::default_dev(), "default_dev()"), + "release" => (Profile::default_release(), "default_release()"), + "test" => (Profile::default_test(), "default_test()"), + "bench" => (Profile::default_bench(), "default_bench()"), + "doc" => (Profile::default_doc(), "default_doc()"), + _ => (Profile::default(), "default()"), + }; + [debug_the_fields( + name + opt_level + lto + codegen_units + debuginfo + debug_assertions + overflow_checks + rpath + incremental + panic + )] + } + } +} + +impl fmt::Display for Profile { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Profile({})", self.name) + } +} + +impl hash::Hash for Profile { + fn hash(&self, state: &mut H) + where + H: hash::Hasher, + { + self.comparable().hash(state); + } +} + +impl cmp::PartialEq for Profile { + fn eq(&self, other: &Self) -> bool { + self.comparable() == other.comparable() + } +} + +impl Profile { + fn default_dev() -> Profile { + Profile { + name: "dev", + debuginfo: Some(2), + debug_assertions: true, + overflow_checks: true, + incremental: true, + ..Profile::default() + } + } + + fn default_release() -> Profile { + Profile { + name: "release", + opt_level: InternedString::new("3"), + ..Profile::default() + } + } + + fn default_test() -> Profile { + Profile { + name: "test", + ..Profile::default_dev() + } + } + + fn default_bench() -> Profile { + Profile { + name: "bench", + ..Profile::default_release() + } + } + + fn default_doc() -> Profile { + Profile { + name: "doc", + ..Profile::default_dev() + } + } + + /// Compares all fields except `name`, which doesn't affect compilation. + /// This is necessary for `Unit` deduplication for things like "test" and + /// "dev" which are essentially the same. + fn comparable( + &self, + ) -> ( + &InternedString, + &Lto, + &Option, + &Option, + &bool, + &bool, + &bool, + &bool, + &Option, + ) { + ( + &self.opt_level, + &self.lto, + &self.codegen_units, + &self.debuginfo, + &self.debug_assertions, + &self.overflow_checks, + &self.rpath, + &self.incremental, + &self.panic, + ) + } +} + +/// The link-time-optimization setting. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, PartialOrd, Ord)] +pub enum Lto { + /// False = no LTO + /// True = "Fat" LTO + Bool(bool), + /// Named LTO settings like "thin". + Named(InternedString), +} + +/// Flags used in creating `Unit`s to indicate the purpose for the target, and +/// to ensure the target's dependencies have the correct settings. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +pub struct UnitFor { + /// A target for `build.rs` or any of its dependencies. This enables + /// `build-override` profiles for these targets. + custom_build: bool, + /// This is true if it is *allowed* to set the `panic` flag. Currently + /// this is false for test/bench targets and all their dependencies, and + /// "for_host" units such as proc macro and custom build scripts and their + /// dependencies. + panic_ok: bool, +} + +impl UnitFor { + /// A unit for a normal target/dependency (i.e., not custom build, + /// proc macro/plugin, or test/bench). + pub fn new_normal() -> UnitFor { + UnitFor { + custom_build: false, + panic_ok: true, + } + } + + /// A unit for a custom build script or its dependencies. + pub fn new_build() -> UnitFor { + UnitFor { + custom_build: true, + panic_ok: false, + } + } + + /// A unit for a proc macro or compiler plugin or their dependencies. + pub fn new_compiler() -> UnitFor { + UnitFor { + custom_build: false, + panic_ok: false, + } + } + + /// A unit for a test/bench target or their dependencies. + pub fn new_test() -> UnitFor { + UnitFor { + custom_build: false, + panic_ok: false, + } + } + + /// Creates a variant based on `for_host` setting. + /// + /// When `for_host` is true, this clears `panic_ok` in a sticky fashion so + /// that all its dependencies also have `panic_ok=false`. + pub fn with_for_host(self, for_host: bool) -> UnitFor { + UnitFor { + custom_build: self.custom_build, + panic_ok: self.panic_ok && !for_host, + } + } + + /// Returns `true` if this unit is for a custom build script or one of its + /// dependencies. + pub fn is_custom_build(self) -> bool { + self.custom_build + } + + /// Returns `true` if this unit is allowed to set the `panic` compiler flag. + pub fn is_panic_ok(self) -> bool { + self.panic_ok + } + + /// All possible values, used by `clean`. + pub fn all_values() -> &'static [UnitFor] { + static ALL: [UnitFor; 3] = [ + UnitFor { + custom_build: false, + panic_ok: true, + }, + UnitFor { + custom_build: true, + panic_ok: false, + }, + UnitFor { + custom_build: false, + panic_ok: false, + }, + ]; + &ALL + } +} + +/// Profiles loaded from `.cargo/config` files. +#[derive(Clone, Debug, Deserialize, Default)] +pub struct ConfigProfiles { + dev: Option, + release: Option, +} + +impl ConfigProfiles { + pub fn validate(&self, features: &Features, warnings: &mut Vec) -> CargoResult<()> { + if let Some(ref profile) = self.dev { + profile + .validate("dev", features, warnings) + .chain_err(|| failure::format_err!("config profile `profile.dev` is not valid"))?; + } + if let Some(ref profile) = self.release { + profile + .validate("release", features, warnings) + .chain_err(|| { + failure::format_err!("config profile `profile.release` is not valid") + })?; + } + Ok(()) + } +} diff --git a/src/cargo/core/registry.rs b/src/cargo/core/registry.rs new file mode 100644 index 000000000..acac1266f --- /dev/null +++ b/src/cargo/core/registry.rs @@ -0,0 +1,668 @@ +use std::collections::{HashMap, HashSet}; + +use log::{debug, trace}; +use semver::VersionReq; +use url::Url; + +use crate::core::PackageSet; +use crate::core::{Dependency, PackageId, Source, SourceId, SourceMap, Summary}; +use crate::sources::config::SourceConfigMap; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{profile, Config}; + +/// Source of information about a group of packages. +/// +/// See also `core::Source`. +pub trait Registry { + /// Attempt to find the packages that match a dependency request. + fn query( + &mut self, + dep: &Dependency, + f: &mut dyn FnMut(Summary), + fuzzy: bool, + ) -> CargoResult<()>; + + fn query_vec(&mut self, dep: &Dependency, fuzzy: bool) -> CargoResult> { + let mut ret = Vec::new(); + self.query(dep, &mut |s| ret.push(s), fuzzy)?; + Ok(ret) + } + + fn describe_source(&self, source: SourceId) -> String; + fn is_replaced(&self, source: SourceId) -> bool; +} + +/// This structure represents a registry of known packages. It internally +/// contains a number of `Box` instances which are used to load a +/// `Package` from. +/// +/// The resolution phase of Cargo uses this to drive knowledge about new +/// packages as well as querying for lists of new packages. It is here that +/// sources are updated (e.g., network operations) and overrides are +/// handled. +/// +/// The general idea behind this registry is that it is centered around the +/// `SourceMap` structure, contained within which is a mapping of a `SourceId` to +/// a `Source`. Each `Source` in the map has been updated (using network +/// operations if necessary) and is ready to be queried for packages. +pub struct PackageRegistry<'cfg> { + config: &'cfg Config, + sources: SourceMap<'cfg>, + + // A list of sources which are considered "overrides" which take precedent + // when querying for packages. + overrides: Vec, + + // Note that each SourceId does not take into account its `precise` field + // when hashing or testing for equality. When adding a new `SourceId`, we + // want to avoid duplicates in the `SourceMap` (to prevent re-updating the + // same git repo twice for example), but we also want to ensure that the + // loaded source is always updated. + // + // Sources with a `precise` field normally don't need to be updated because + // their contents are already on disk, but sources without a `precise` field + // almost always need to be updated. If we have a cached `Source` for a + // precise `SourceId`, then when we add a new `SourceId` that is not precise + // we want to ensure that the underlying source is updated. + // + // This is basically a long-winded way of saying that we want to know + // precisely what the keys of `sources` are, so this is a mapping of key to + // what exactly the key is. + source_ids: HashMap, + + locked: LockedMap, + yanked_whitelist: HashSet, + source_config: SourceConfigMap<'cfg>, + + patches: HashMap>, + patches_locked: bool, + patches_available: HashMap>, +} + +type LockedMap = HashMap)>>>; + +#[derive(PartialEq, Eq, Clone, Copy)] +enum Kind { + Override, + Locked, + Normal, +} + +impl<'cfg> PackageRegistry<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let source_config = SourceConfigMap::new(config)?; + Ok(PackageRegistry { + config, + sources: SourceMap::new(), + source_ids: HashMap::new(), + overrides: Vec::new(), + source_config, + locked: HashMap::new(), + yanked_whitelist: HashSet::new(), + patches: HashMap::new(), + patches_locked: false, + patches_available: HashMap::new(), + }) + } + + pub fn get(self, package_ids: &[PackageId]) -> CargoResult> { + trace!("getting packages; sources={}", self.sources.len()); + PackageSet::new(package_ids, self.sources, self.config) + } + + fn ensure_loaded(&mut self, namespace: SourceId, kind: Kind) -> CargoResult<()> { + match self.source_ids.get(&namespace) { + // We've previously loaded this source, and we've already locked it, + // so we're not allowed to change it even if `namespace` has a + // slightly different precise version listed. + Some(&(_, Kind::Locked)) => { + debug!("load/locked {}", namespace); + return Ok(()); + } + + // If the previous source was not a precise source, then we can be + // sure that it's already been updated if we've already loaded it. + Some(&(ref previous, _)) if previous.precise().is_none() => { + debug!("load/precise {}", namespace); + return Ok(()); + } + + // If the previous source has the same precise version as we do, + // then we're done, otherwise we need to need to move forward + // updating this source. + Some(&(ref previous, _)) => { + if previous.precise() == namespace.precise() { + debug!("load/match {}", namespace); + return Ok(()); + } + debug!("load/mismatch {}", namespace); + } + None => { + debug!("load/missing {}", namespace); + } + } + + self.load(namespace, kind)?; + Ok(()) + } + + pub fn add_sources(&mut self, ids: impl IntoIterator) -> CargoResult<()> { + for id in ids { + self.ensure_loaded(id, Kind::Locked)?; + } + Ok(()) + } + + pub fn add_preloaded(&mut self, source: Box) { + self.add_source(source, Kind::Locked); + } + + fn add_source(&mut self, source: Box, kind: Kind) { + let id = source.source_id(); + self.sources.insert(source); + self.source_ids.insert(id, (id, kind)); + } + + pub fn add_override(&mut self, source: Box) { + self.overrides.push(source.source_id()); + self.add_source(source, Kind::Override); + } + + pub fn add_to_yanked_whitelist(&mut self, iter: impl Iterator) { + let pkgs = iter.collect::>(); + for (_, source) in self.sources.sources_mut() { + source.add_to_yanked_whitelist(&pkgs); + } + self.yanked_whitelist.extend(pkgs); + } + + pub fn register_lock(&mut self, id: PackageId, deps: Vec) { + trace!("register_lock: {}", id); + for dep in deps.iter() { + trace!("\t-> {}", dep); + } + let sub_map = self + .locked + .entry(id.source_id()) + .or_insert_with(HashMap::new); + let sub_vec = sub_map + .entry(id.name().to_string()) + .or_insert_with(Vec::new); + sub_vec.push((id, deps)); + } + + /// Insert a `[patch]` section into this registry. + /// + /// This method will insert a `[patch]` section for the `url` specified, + /// with the given list of dependencies. The `url` specified is the URL of + /// the source to patch (for example this is `crates-io` in the manifest). + /// The `deps` is an array of all the entries in the `[patch]` section of + /// the manifest. + /// + /// Here the `deps` will be resolved to a precise version and stored + /// internally for future calls to `query` below. It's expected that `deps` + /// have had `lock_to` call already, if applicable. (e.g., if a lock file was + /// already present). + /// + /// Note that the patch list specified here *will not* be available to + /// `query` until `lock_patches` is called below, which should be called + /// once all patches have been added. + pub fn patch(&mut self, url: &Url, deps: &[Dependency]) -> CargoResult<()> { + // First up we need to actually resolve each `deps` specification to + // precisely one summary. We're not using the `query` method below as it + // internally uses maps we're building up as part of this method + // (`patches_available` and `patches). Instead we're going straight to + // the source to load information from it. + // + // Remember that each dependency listed in `[patch]` has to resolve to + // precisely one package, so that's why we're just creating a flat list + // of summaries which should be the same length as `deps` above. + let unlocked_summaries = deps + .iter() + .map(|dep| { + debug!( + "registring a patch for `{}` with `{}`", + url, + dep.package_name() + ); + + // Go straight to the source for resolving `dep`. Load it as we + // normally would and then ask it directly for the list of summaries + // corresponding to this `dep`. + self.ensure_loaded(dep.source_id(), Kind::Normal) + .chain_err(|| { + failure::format_err!( + "failed to load source for a dependency \ + on `{}`", + dep.package_name() + ) + })?; + + let mut summaries = self + .sources + .get_mut(dep.source_id()) + .expect("loaded source not present") + .query_vec(dep)? + .into_iter(); + + let summary = match summaries.next() { + Some(summary) => summary, + None => failure::bail!( + "patch for `{}` in `{}` did not resolve to any crates. If this is \ + unexpected, you may wish to consult: \ + https://github.com/rust-lang/cargo/issues/4678", + dep.package_name(), + url + ), + }; + if summaries.next().is_some() { + failure::bail!( + "patch for `{}` in `{}` resolved to more than one candidate", + dep.package_name(), + url + ) + } + if summary.package_id().source_id().url() == url { + failure::bail!( + "patch for `{}` in `{}` points to the same source, but \ + patches must point to different sources", + dep.package_name(), + url + ); + } + Ok(summary) + }) + .collect::>>() + .chain_err(|| failure::format_err!("failed to resolve patches for `{}`", url))?; + + // Note that we do not use `lock` here to lock summaries! That step + // happens later once `lock_patches` is invoked. In the meantime though + // we want to fill in the `patches_available` map (later used in the + // `lock` method) and otherwise store the unlocked summaries in + // `patches` to get locked in a future call to `lock_patches`. + let ids = unlocked_summaries.iter().map(|s| s.package_id()).collect(); + self.patches_available.insert(url.clone(), ids); + self.patches.insert(url.clone(), unlocked_summaries); + + Ok(()) + } + + /// Lock all patch summaries added via `patch`, making them available to + /// resolution via `query`. + /// + /// This function will internally `lock` each summary added via `patch` + /// above now that the full set of `patch` packages are known. This'll allow + /// us to correctly resolve overridden dependencies between patches + /// hopefully! + pub fn lock_patches(&mut self) { + assert!(!self.patches_locked); + for summaries in self.patches.values_mut() { + for summary in summaries { + *summary = lock(&self.locked, &self.patches_available, summary.clone()); + } + } + self.patches_locked = true; + } + + pub fn patches(&self) -> &HashMap> { + &self.patches + } + + fn load(&mut self, source_id: SourceId, kind: Kind) -> CargoResult<()> { + (|| { + debug!("loading source {}", source_id); + let source = self.source_config.load(source_id, &self.yanked_whitelist)?; + assert_eq!(source.source_id(), source_id); + + if kind == Kind::Override { + self.overrides.push(source_id); + } + self.add_source(source, kind); + + // Ensure the source has fetched all necessary remote data. + let _p = profile::start(format!("updating: {}", source_id)); + self.sources.get_mut(source_id).unwrap().update() + })() + .chain_err(|| failure::format_err!("Unable to update {}", source_id))?; + Ok(()) + } + + fn query_overrides(&mut self, dep: &Dependency) -> CargoResult> { + for &s in self.overrides.iter() { + let src = self.sources.get_mut(s).unwrap(); + let dep = Dependency::new_override(&*dep.package_name(), s); + let mut results = src.query_vec(&dep)?; + if !results.is_empty() { + return Ok(Some(results.remove(0))); + } + } + Ok(None) + } + + /// This function is used to transform a summary to another locked summary + /// if possible. This is where the concept of a lock file comes into play. + /// + /// If a summary points at a package ID which was previously locked, then we + /// override the summary's ID itself, as well as all dependencies, to be + /// rewritten to the locked versions. This will transform the summary's + /// source to a precise source (listed in the locked version) as well as + /// transforming all of the dependencies from range requirements on + /// imprecise sources to exact requirements on precise sources. + /// + /// If a summary does not point at a package ID which was previously locked, + /// or if any dependencies were added and don't have a previously listed + /// version, we still want to avoid updating as many dependencies as + /// possible to keep the graph stable. In this case we map all of the + /// summary's dependencies to be rewritten to a locked version wherever + /// possible. If we're unable to map a dependency though, we just pass it on + /// through. + pub fn lock(&self, summary: Summary) -> Summary { + assert!(self.patches_locked); + lock(&self.locked, &self.patches_available, summary) + } + + fn warn_bad_override( + &self, + override_summary: &Summary, + real_summary: &Summary, + ) -> CargoResult<()> { + let mut real_deps = real_summary.dependencies().iter().collect::>(); + + let boilerplate = "\ +This is currently allowed but is known to produce buggy behavior with spurious +recompiles and changes to the crate graph. Path overrides unfortunately were +never intended to support this feature, so for now this message is just a +warning. In the future, however, this message will become a hard error. + +To change the dependency graph via an override it's recommended to use the +`[replace]` feature of Cargo instead of the path override feature. This is +documented online at the url below for more information. + +https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#overriding-dependencies +"; + + for dep in override_summary.dependencies() { + if let Some(i) = real_deps.iter().position(|d| dep == *d) { + real_deps.remove(i); + continue; + } + let msg = format!( + "\ + path override for crate `{}` has altered the original list of\n\ + dependencies; the dependency on `{}` was either added or\n\ + modified to not match the previously resolved version\n\n\ + {}", + override_summary.package_id().name(), + dep.package_name(), + boilerplate + ); + self.source_config.config().shell().warn(&msg)?; + return Ok(()); + } + + if let Some(dep) = real_deps.get(0) { + let msg = format!( + "\ + path override for crate `{}` has altered the original list of + dependencies; the dependency on `{}` was removed\n\n + {}", + override_summary.package_id().name(), + dep.package_name(), + boilerplate + ); + self.source_config.config().shell().warn(&msg)?; + return Ok(()); + } + + Ok(()) + } +} + +impl<'cfg> Registry for PackageRegistry<'cfg> { + fn query( + &mut self, + dep: &Dependency, + f: &mut dyn FnMut(Summary), + fuzzy: bool, + ) -> CargoResult<()> { + assert!(self.patches_locked); + let (override_summary, n, to_warn) = { + // Look for an override and get ready to query the real source. + let override_summary = self.query_overrides(dep)?; + + // Next up on our list of candidates is to check the `[patch]` + // section of the manifest. Here we look through all patches + // relevant to the source that `dep` points to, and then we match + // name/version. Note that we don't use `dep.matches(..)` because + // the patches, by definition, come from a different source. + // This means that `dep.matches(..)` will always return false, when + // what we really care about is the name/version match. + let mut patches = Vec::::new(); + if let Some(extra) = self.patches.get(dep.source_id().url()) { + patches.extend( + extra + .iter() + .filter(|s| dep.matches_ignoring_source(s.package_id())) + .cloned(), + ); + } + + // A crucial feature of the `[patch]` feature is that we *don't* + // query the actual registry if we have a "locked" dependency. A + // locked dep basically just means a version constraint of `=a.b.c`, + // and because patches take priority over the actual source then if + // we have a candidate we're done. + if patches.len() == 1 && dep.is_locked() { + let patch = patches.remove(0); + match override_summary { + Some(summary) => (summary, 1, Some(patch)), + None => { + f(patch); + return Ok(()); + } + } + } else { + if !patches.is_empty() { + debug!( + "found {} patches with an unlocked dep on `{}` at {} \ + with `{}`, \ + looking at sources", + patches.len(), + dep.package_name(), + dep.source_id(), + dep.version_req() + ); + } + + // Ensure the requested source_id is loaded + self.ensure_loaded(dep.source_id(), Kind::Normal) + .chain_err(|| { + failure::format_err!( + "failed to load source for a dependency \ + on `{}`", + dep.package_name() + ) + })?; + + let source = self.sources.get_mut(dep.source_id()); + match (override_summary, source) { + (Some(_), None) => failure::bail!("override found but no real ones"), + (None, None) => return Ok(()), + + // If we don't have an override then we just ship + // everything upstairs after locking the summary + (None, Some(source)) => { + for patch in patches.iter() { + f(patch.clone()); + } + + // Our sources shouldn't ever come back to us with two + // summaries that have the same version. We could, + // however, have an `[patch]` section which is in use + // to override a version in the registry. This means + // that if our `summary` in this loop has the same + // version as something in `patches` that we've + // already selected, then we skip this `summary`. + let locked = &self.locked; + let all_patches = &self.patches_available; + let callback = &mut |summary: Summary| { + for patch in patches.iter() { + let patch = patch.package_id().version(); + if summary.package_id().version() == patch { + return; + } + } + f(lock(locked, all_patches, summary)) + }; + return if fuzzy { + source.fuzzy_query(dep, callback) + } else { + source.query(dep, callback) + }; + } + + // If we have an override summary then we query the source + // to sanity check its results. We don't actually use any of + // the summaries it gives us though. + (Some(override_summary), Some(source)) => { + if !patches.is_empty() { + failure::bail!("found patches and a path override") + } + let mut n = 0; + let mut to_warn = None; + { + let callback = &mut |summary| { + n += 1; + to_warn = Some(summary); + }; + if fuzzy { + source.fuzzy_query(dep, callback)?; + } else { + source.query(dep, callback)?; + } + } + (override_summary, n, to_warn) + } + } + } + }; + + if n > 1 { + failure::bail!("found an override with a non-locked list"); + } else if let Some(summary) = to_warn { + self.warn_bad_override(&override_summary, &summary)?; + } + f(self.lock(override_summary)); + Ok(()) + } + + fn describe_source(&self, id: SourceId) -> String { + match self.sources.get(id) { + Some(src) => src.describe(), + None => id.to_string(), + } + } + + fn is_replaced(&self, id: SourceId) -> bool { + match self.sources.get(id) { + Some(src) => src.is_replaced(), + None => false, + } + } +} + +fn lock(locked: &LockedMap, patches: &HashMap>, summary: Summary) -> Summary { + let pair = locked + .get(&summary.source_id()) + .and_then(|map| map.get(&*summary.name())) + .and_then(|vec| vec.iter().find(|&&(id, _)| id == summary.package_id())); + + trace!("locking summary of {}", summary.package_id()); + + // Lock the summary's ID if possible + let summary = match pair { + Some(&(ref precise, _)) => summary.override_id(precise.clone()), + None => summary, + }; + summary.map_dependencies(|dep| { + trace!( + "\t{}/{}/{}", + dep.package_name(), + dep.version_req(), + dep.source_id() + ); + + // If we've got a known set of overrides for this summary, then + // one of a few cases can arise: + // + // 1. We have a lock entry for this dependency from the same + // source as it's listed as coming from. In this case we make + // sure to lock to precisely the given package ID. + // + // 2. We have a lock entry for this dependency, but it's from a + // different source than what's listed, or the version + // requirement has changed. In this case we must discard the + // locked version because the dependency needs to be + // re-resolved. + // + // 3. We don't have a lock entry for this dependency, in which + // case it was likely an optional dependency which wasn't + // included previously so we just pass it through anyway. + // + // Cases 1/2 are handled by `matches_id` and case 3 is handled by + // falling through to the logic below. + if let Some(&(_, ref locked_deps)) = pair { + let locked = locked_deps.iter().find(|&&id| dep.matches_id(id)); + if let Some(&locked) = locked { + trace!("\tfirst hit on {}", locked); + let mut dep = dep; + dep.lock_to(locked); + return dep; + } + } + + // If this dependency did not have a locked version, then we query + // all known locked packages to see if they match this dependency. + // If anything does then we lock it to that and move on. + let v = locked + .get(&dep.source_id()) + .and_then(|map| map.get(&*dep.package_name())) + .and_then(|vec| vec.iter().find(|&&(id, _)| dep.matches_id(id))); + if let Some(&(id, _)) = v { + trace!("\tsecond hit on {}", id); + let mut dep = dep; + dep.lock_to(id); + return dep; + } + + // Finally we check to see if any registered patches correspond to + // this dependency. + let v = patches.get(dep.source_id().url()).map(|vec| { + let dep2 = dep.clone(); + let mut iter = vec + .iter() + .filter(move |&&p| dep2.matches_ignoring_source(p)); + (iter.next(), iter) + }); + if let Some((Some(patch_id), mut remaining)) = v { + assert!(remaining.next().is_none()); + let patch_source = patch_id.source_id(); + let patch_locked = locked + .get(&patch_source) + .and_then(|m| m.get(&*patch_id.name())) + .map(|list| list.iter().any(|&(ref id, _)| id == patch_id)) + .unwrap_or(false); + + if patch_locked { + trace!("\tthird hit on {}", patch_id); + let req = VersionReq::exact(patch_id.version()); + let mut dep = dep; + dep.set_version_req(req); + return dep; + } + } + + trace!("\tnope, unlocked"); + dep + }) +} diff --git a/src/cargo/core/resolver/conflict_cache.rs b/src/cargo/core/resolver/conflict_cache.rs new file mode 100644 index 000000000..6e14dade1 --- /dev/null +++ b/src/cargo/core/resolver/conflict_cache.rs @@ -0,0 +1,198 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; + +use log::trace; + +use super::types::ConflictReason; +use crate::core::resolver::Context; +use crate::core::{Dependency, PackageId}; + +/// This is a trie for storing a large number of sets designed to +/// efficiently see if any of the stored sets are a subset of a search set. +enum ConflictStoreTrie { + /// One of the stored sets. + Leaf(BTreeMap), + /// A map from an element to a subtrie where + /// all the sets in the subtrie contains that element. + Node(BTreeMap), +} + +impl ConflictStoreTrie { + /// Finds any known set of conflicts, if any, + /// which are activated in `cx` and pass the `filter` specified? + fn find_conflicting( + &self, + cx: &Context, + must_contain: Option, + ) -> Option<&BTreeMap> { + match self { + ConflictStoreTrie::Leaf(c) => { + if must_contain.is_none() { + // `is_conflicting` checks that all the elements are active, + // but we have checked each one by the recursion of this function. + debug_assert!(cx.is_conflicting(None, c)); + Some(c) + } else { + // We did not find `must_contain`, so we need to keep looking. + None + } + } + ConflictStoreTrie::Node(m) => { + for (&pid, store) in must_contain + .map(|f| m.range(..=f)) + .unwrap_or_else(|| m.range(..)) + { + // If the key is active, then we need to check all of the corresponding subtrie. + if cx.is_active(pid) { + if let Some(o) = + store.find_conflicting(cx, must_contain.filter(|&f| f != pid)) + { + return Some(o); + } + } + // Else, if it is not active then there is no way any of the corresponding + // subtrie will be conflicting. + } + None + } + } + } + + fn insert( + &mut self, + mut iter: impl Iterator, + con: BTreeMap, + ) { + if let Some(pid) = iter.next() { + if let ConflictStoreTrie::Node(p) = self { + p.entry(pid) + .or_insert_with(|| ConflictStoreTrie::Node(BTreeMap::new())) + .insert(iter, con); + } + // Else, we already have a subset of this in the `ConflictStore`. + } else { + // We are at the end of the set we are adding, there are three cases for what to do + // next: + // 1. `self` is a empty dummy Node inserted by `or_insert_with` + // in witch case we should replace it with `Leaf(con)`. + // 2. `self` is a `Node` because we previously inserted a superset of + // the thing we are working on (I don't know if this happens in practice) + // but the subset that we are working on will + // always match any time the larger set would have + // in witch case we can replace it with `Leaf(con)`. + // 3. `self` is a `Leaf` that is in the same spot in the structure as + // the thing we are working on. So it is equivalent. + // We can replace it with `Leaf(con)`. + if cfg!(debug_assertions) { + if let ConflictStoreTrie::Leaf(c) = self { + let a: Vec<_> = con.keys().collect(); + let b: Vec<_> = c.keys().collect(); + assert_eq!(a, b); + } + } + *self = ConflictStoreTrie::Leaf(con) + } + } +} + +pub(super) struct ConflictCache { + // `con_from_dep` is a cache of the reasons for each time we + // backtrack. For example after several backtracks we may have: + // + // con_from_dep[`foo = "^1.0.2"`] = map!{ + // `foo=1.0.1`: map!{`foo=1.0.1`: Semver}, + // `foo=1.0.0`: map!{`foo=1.0.0`: Semver}, + // }; + // + // This can be read as "we cannot find a candidate for dep `foo = "^1.0.2"` + // if either `foo=1.0.1` OR `foo=1.0.0` are activated". + // + // Another example after several backtracks we may have: + // + // con_from_dep[`foo = ">=0.8.2, <=0.9.3"`] = map!{ + // `foo=0.8.1`: map!{ + // `foo=0.9.4`: map!{`foo=0.8.1`: Semver, `foo=0.9.4`: Semver}, + // } + // }; + // + // This can be read as "we cannot find a candidate for dep `foo = ">=0.8.2, + // <=0.9.3"` if both `foo=0.8.1` AND `foo=0.9.4` are activated". + // + // This is used to make sure we don't queue work we know will fail. See the + // discussion in https://github.com/rust-lang/cargo/pull/5168 for why this + // is so important. The nested HashMaps act as a kind of btree, that lets us + // look up which entries are still active without + // linearly scanning through the full list. + // + // Also, as a final note, this map is **not** ever removed from. This remains + // as a global cache which we never delete from. Any entry in this map is + // unconditionally true regardless of our resolution history of how we got + // here. + con_from_dep: HashMap, + // `dep_from_pid` is an inverse-index of `con_from_dep`. + // For every `PackageId` this lists the `Dependency`s that mention it in `dep_from_pid`. + dep_from_pid: HashMap>, +} + +impl ConflictCache { + pub fn new() -> ConflictCache { + ConflictCache { + con_from_dep: HashMap::new(), + dep_from_pid: HashMap::new(), + } + } + /// Finds any known set of conflicts, if any, + /// which are activated in `cx` and pass the `filter` specified? + pub fn find_conflicting( + &self, + cx: &Context, + dep: &Dependency, + must_contain: Option, + ) -> Option<&BTreeMap> { + let out = self + .con_from_dep + .get(dep)? + .find_conflicting(cx, must_contain); + if cfg!(debug_assertions) { + if let Some(f) = must_contain { + if let Some(c) = &out { + assert!(c.contains_key(&f)); + } + } + } + out + } + pub fn conflicting( + &self, + cx: &Context, + dep: &Dependency, + ) -> Option<&BTreeMap> { + self.find_conflicting(cx, dep, None) + } + + /// Adds to the cache a conflict of the form: + /// `dep` is known to be unresolvable if + /// all the `PackageId` entries are activated. + pub fn insert(&mut self, dep: &Dependency, con: &BTreeMap) { + self.con_from_dep + .entry(dep.clone()) + .or_insert_with(|| ConflictStoreTrie::Node(BTreeMap::new())) + .insert(con.keys().cloned(), con.clone()); + + trace!( + "{} = \"{}\" adding a skip {:?}", + dep.package_name(), + dep.version_req(), + con + ); + + for c in con.keys() { + self.dep_from_pid + .entry(c.clone()) + .or_insert_with(HashSet::new) + .insert(dep.clone()); + } + } + pub fn dependencies_conflicting_with(&self, pid: PackageId) -> Option<&HashSet> { + self.dep_from_pid.get(&pid) + } +} diff --git a/src/cargo/core/resolver/context.rs b/src/cargo/core/resolver/context.rs new file mode 100644 index 000000000..eb13dfa08 --- /dev/null +++ b/src/cargo/core/resolver/context.rs @@ -0,0 +1,419 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::rc::Rc; + +// "ensure" seems to require "bail" be in scope (macro hygiene issue?). +#[allow(unused_imports)] +use failure::{bail, ensure}; +use log::debug; + +use crate::core::interning::InternedString; +use crate::core::{Dependency, FeatureValue, PackageId, SourceId, Summary}; +use crate::util::CargoResult; +use crate::util::Graph; + +use super::errors::ActivateResult; +use super::types::{ConflictReason, DepInfo, GraphNode, Method, RcList, RegistryQueryer}; + +pub use super::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; +pub use super::encode::{Metadata, WorkspaceResolve}; +pub use super::resolve::Resolve; + +// A `Context` is basically a bunch of local resolution information which is +// kept around for all `BacktrackFrame` instances. As a result, this runs the +// risk of being cloned *a lot* so we want to make this as cheap to clone as +// possible. +#[derive(Clone)] +pub struct Context { + pub activations: Activations, + pub resolve_features: im_rc::HashMap>>, + pub links: im_rc::HashMap, + + // These are two cheaply-cloneable lists (O(1) clone) which are effectively + // hash maps but are built up as "construction lists". We'll iterate these + // at the very end and actually construct the map that we're making. + pub resolve_graph: RcList, + pub resolve_replacements: RcList<(PackageId, PackageId)>, + + // These warnings are printed after resolution. + pub warnings: RcList, +} + +pub type Activations = im_rc::HashMap<(InternedString, SourceId), Rc>>; + +impl Context { + pub fn new() -> Context { + Context { + resolve_graph: RcList::new(), + resolve_features: im_rc::HashMap::new(), + links: im_rc::HashMap::new(), + resolve_replacements: RcList::new(), + activations: im_rc::HashMap::new(), + warnings: RcList::new(), + } + } + + /// Activate this summary by inserting it into our list of known activations. + /// + /// Returns `true` if this summary with the given method is already activated. + pub fn flag_activated(&mut self, summary: &Summary, method: &Method<'_>) -> CargoResult { + let id = summary.package_id(); + let prev = self + .activations + .entry((id.name(), id.source_id())) + .or_insert_with(|| Rc::new(Vec::new())); + if !prev.iter().any(|c| c == summary) { + self.resolve_graph.push(GraphNode::Add(id)); + if let Some(link) = summary.links() { + ensure!( + self.links.insert(link, id).is_none(), + "Attempting to resolve a dependency with more then one crate with the \ + links={}.\nThis will not build as is. Consider rebuilding the .lock file.", + &*link + ); + } + Rc::make_mut(prev).push(summary.clone()); + return Ok(false); + } + debug!("checking if {} is already activated", summary.package_id()); + let (features, use_default) = match *method { + Method::Everything + | Method::Required { + all_features: true, .. + } => return Ok(false), + Method::Required { + features, + uses_default_features, + .. + } => (features, uses_default_features), + }; + + let has_default_feature = summary.features().contains_key("default"); + Ok(match self.resolve_features.get(&id) { + Some(prev) => { + features.iter().all(|f| prev.contains(f)) + && (!use_default || prev.contains("default") || !has_default_feature) + } + None => features.is_empty() && (!use_default || !has_default_feature), + }) + } + + pub fn build_deps( + &mut self, + registry: &mut RegistryQueryer<'_>, + parent: Option<&Summary>, + candidate: &Summary, + method: &Method<'_>, + ) -> ActivateResult> { + // First, figure out our set of dependencies based on the requested set + // of features. This also calculates what features we're going to enable + // for our own dependencies. + let deps = self.resolve_features(parent, candidate, method)?; + + // Next, transform all dependencies into a list of possible candidates + // which can satisfy that dependency. + let mut deps = deps + .into_iter() + .map(|(dep, features)| { + let candidates = registry.query(&dep)?; + Ok((dep, candidates, Rc::new(features))) + }) + .collect::>>()?; + + // Attempt to resolve dependencies with fewer candidates before trying + // dependencies with more candidates. This way if the dependency with + // only one candidate can't be resolved we don't have to do a bunch of + // work before we figure that out. + deps.sort_by_key(|&(_, ref a, _)| a.len()); + + Ok(deps) + } + + pub fn prev_active(&self, dep: &Dependency) -> &[Summary] { + self.activations + .get(&(dep.package_name(), dep.source_id())) + .map(|v| &v[..]) + .unwrap_or(&[]) + } + + pub fn is_active(&self, id: PackageId) -> bool { + self.activations + .get(&(id.name(), id.source_id())) + .map(|v| v.iter().any(|s| s.package_id() == id)) + .unwrap_or(false) + } + + /// Checks whether all of `parent` and the keys of `conflicting activations` + /// are still active. + pub fn is_conflicting( + &self, + parent: Option, + conflicting_activations: &BTreeMap, + ) -> bool { + conflicting_activations + .keys() + .chain(parent.as_ref()) + .all(|&id| self.is_active(id)) + } + + /// Returns all dependencies and the features we want from them. + fn resolve_features<'b>( + &mut self, + parent: Option<&Summary>, + s: &'b Summary, + method: &'b Method<'_>, + ) -> ActivateResult)>> { + let dev_deps = match *method { + Method::Everything => true, + Method::Required { dev_deps, .. } => dev_deps, + }; + + // First, filter by dev-dependencies. + let deps = s.dependencies(); + let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps); + + let reqs = build_requirements(s, method)?; + let mut ret = Vec::new(); + let mut used_features = HashSet::new(); + let default_dep = (false, Vec::new()); + + // Next, collect all actually enabled dependencies and their features. + for dep in deps { + // Skip optional dependencies, but not those enabled through a + // feature + if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { + continue; + } + // So we want this dependency. Move the features we want from + // `feature_deps` to `ret` and register ourselves as using this + // name. + let base = reqs.deps.get(&dep.name_in_toml()).unwrap_or(&default_dep); + used_features.insert(dep.name_in_toml()); + let always_required = !dep.is_optional() + && !s + .dependencies() + .iter() + .any(|d| d.is_optional() && d.name_in_toml() == dep.name_in_toml()); + if always_required && base.0 { + self.warnings.push(format!( + "Package `{}` does not have feature `{}`. It has a required dependency \ + with that name, but only optional dependencies can be used as features. \ + This is currently a warning to ease the transition, but it will become an \ + error in the future.", + s.package_id(), + dep.name_in_toml() + )); + } + let mut base = base.1.clone(); + base.extend(dep.features().iter()); + for feature in base.iter() { + if feature.contains('/') { + return Err(failure::format_err!( + "feature names may not contain slashes: `{}`", + feature + ) + .into()); + } + } + ret.push((dep.clone(), base)); + } + + // Any entries in `reqs.dep` which weren't used are bugs in that the + // package does not actually have those dependencies. We classified + // them as dependencies in the first place because there is no such + // feature, either. + let remaining = reqs + .deps + .keys() + .cloned() + .filter(|s| !used_features.contains(s)) + .collect::>(); + if !remaining.is_empty() { + let features = remaining.join(", "); + return Err(match parent { + None => failure::format_err!( + "Package `{}` does not have these features: `{}`", + s.package_id(), + features + ) + .into(), + Some(p) => (p.package_id(), ConflictReason::MissingFeatures(features)).into(), + }); + } + + // Record what list of features is active for this package. + if !reqs.used.is_empty() { + let pkgid = s.package_id(); + + let set = Rc::make_mut( + self.resolve_features + .entry(pkgid) + .or_insert_with(|| Rc::new(HashSet::new())), + ); + + for feature in reqs.used { + set.insert(feature); + } + } + + Ok(ret) + } + + pub fn resolve_replacements(&self) -> HashMap { + let mut replacements = HashMap::new(); + let mut cur = &self.resolve_replacements; + while let Some(ref node) = cur.head { + let (k, v) = node.0; + replacements.insert(k, v); + cur = &node.1; + } + replacements + } + + pub fn graph(&self) -> Graph> { + let mut graph: Graph> = Graph::new(); + let mut cur = &self.resolve_graph; + while let Some(ref node) = cur.head { + match node.0 { + GraphNode::Add(ref p) => graph.add(p.clone()), + GraphNode::Link(ref a, ref b, ref dep) => { + graph.link(a.clone(), b.clone()).push(dep.clone()); + } + } + cur = &node.1; + } + graph + } +} + +/// Takes requested features for a single package from the input `Method` and +/// recurses to find all requested features, dependencies and requested +/// dependency features in a `Requirements` object, returning it to the resolver. +fn build_requirements<'a, 'b: 'a>( + s: &'a Summary, + method: &'b Method<'_>, +) -> CargoResult> { + let mut reqs = Requirements::new(s); + + match *method { + Method::Everything + | Method::Required { + all_features: true, .. + } => { + for key in s.features().keys() { + reqs.require_feature(*key)?; + } + for dep in s.dependencies().iter().filter(|d| d.is_optional()) { + reqs.require_dependency(dep.name_in_toml()); + } + } + Method::Required { + all_features: false, + features: requested, + .. + } => { + for &f in requested.iter() { + reqs.require_value(&FeatureValue::new(f, s))?; + } + } + } + match *method { + Method::Everything + | Method::Required { + uses_default_features: true, + .. + } => { + if s.features().contains_key("default") { + reqs.require_feature(InternedString::new("default"))?; + } + } + Method::Required { + uses_default_features: false, + .. + } => {} + } + Ok(reqs) +} + +struct Requirements<'a> { + summary: &'a Summary, + // The deps map is a mapping of package name to list of features enabled. + // Each package should be enabled, and each package should have the + // specified set of features enabled. The boolean indicates whether this + // package was specifically requested (rather than just requesting features + // *within* this package). + deps: HashMap)>, + // The used features set is the set of features which this local package had + // enabled, which is later used when compiling to instruct the code what + // features were enabled. + used: HashSet, + visited: HashSet, +} + +impl<'r> Requirements<'r> { + fn new(summary: &Summary) -> Requirements<'_> { + Requirements { + summary, + deps: HashMap::new(), + used: HashSet::new(), + visited: HashSet::new(), + } + } + + fn require_crate_feature(&mut self, package: InternedString, feat: InternedString) { + self.used.insert(package); + self.deps + .entry(package) + .or_insert((false, Vec::new())) + .1 + .push(feat); + } + + fn seen(&mut self, feat: InternedString) -> bool { + if self.visited.insert(feat) { + self.used.insert(feat); + false + } else { + true + } + } + + fn require_dependency(&mut self, pkg: InternedString) { + if self.seen(pkg) { + return; + } + self.deps.entry(pkg).or_insert((false, Vec::new())).0 = true; + } + + fn require_feature(&mut self, feat: InternedString) -> CargoResult<()> { + if feat.is_empty() || self.seen(feat) { + return Ok(()); + } + for fv in self + .summary + .features() + .get(feat.as_str()) + .expect("must be a valid feature") + { + match *fv { + FeatureValue::Feature(ref dep_feat) if **dep_feat == *feat => failure::bail!( + "cyclic feature dependency: feature `{}` depends on itself", + feat + ), + _ => {} + } + self.require_value(fv)?; + } + Ok(()) + } + + fn require_value<'f>(&mut self, fv: &'f FeatureValue) -> CargoResult<()> { + match fv { + FeatureValue::Feature(feat) => self.require_feature(*feat)?, + FeatureValue::Crate(dep) => self.require_dependency(*dep), + FeatureValue::CrateFeature(dep, dep_feat) => { + self.require_crate_feature(*dep, *dep_feat) + } + }; + Ok(()) + } +} diff --git a/src/cargo/core/resolver/encode.rs b/src/cargo/core/resolver/encode.rs new file mode 100644 index 000000000..6f3e59dba --- /dev/null +++ b/src/cargo/core/resolver/encode.rs @@ -0,0 +1,423 @@ +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::fmt; +use std::str::FromStr; + +use log::debug; +use serde::de; +use serde::ser; +use serde::{Deserialize, Serialize}; + +use crate::core::{Dependency, Package, PackageId, SourceId, Workspace}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{internal, Graph}; + +use super::Resolve; + +#[derive(Serialize, Deserialize, Debug)] +pub struct EncodableResolve { + package: Option>, + /// `root` is optional to allow backward compatibility. + root: Option, + metadata: Option, + + #[serde(default, skip_serializing_if = "Patch::is_empty")] + patch: Patch, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +struct Patch { + unused: Vec, +} + +pub type Metadata = BTreeMap; + +impl EncodableResolve { + pub fn into_resolve(self, ws: &Workspace<'_>) -> CargoResult { + let path_deps = build_path_deps(ws); + + let packages = { + let mut packages = self.package.unwrap_or_default(); + if let Some(root) = self.root { + packages.insert(0, root); + } + packages + }; + + // `PackageId`s in the lock file don't include the `source` part + // for workspace members, so we reconstruct proper IDs. + let live_pkgs = { + let mut live_pkgs = HashMap::new(); + let mut all_pkgs = HashSet::new(); + for pkg in packages.iter() { + let enc_id = EncodablePackageId { + name: pkg.name.clone(), + version: pkg.version.clone(), + source: pkg.source, + }; + + if !all_pkgs.insert(enc_id.clone()) { + failure::bail!("package `{}` is specified twice in the lockfile", pkg.name); + } + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + // We failed to find a local package in the workspace. + // It must have been removed and should be ignored. + None => { + debug!("path dependency now missing {} v{}", pkg.name, pkg.version); + continue; + } + Some(&source) => PackageId::new(&pkg.name, &pkg.version, source)?, + }; + + assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) + } + live_pkgs + }; + + let lookup_id = |enc_id: &EncodablePackageId| -> Option { + live_pkgs.get(enc_id).map(|&(id, _)| id) + }; + + let g = { + let mut g = Graph::new(); + + for &(ref id, _) in live_pkgs.values() { + g.add(id.clone()); + } + + for &(ref id, pkg) in live_pkgs.values() { + let deps = match pkg.dependencies { + Some(ref deps) => deps, + None => continue, + }; + + for edge in deps.iter() { + if let Some(to_depend_on) = lookup_id(edge) { + g.link(id.clone(), to_depend_on); + } + } + } + g + }; + + let replacements = { + let mut replacements = HashMap::new(); + for &(ref id, pkg) in live_pkgs.values() { + if let Some(ref replace) = pkg.replace { + assert!(pkg.dependencies.is_none()); + if let Some(replace_id) = lookup_id(replace) { + replacements.insert(id.clone(), replace_id); + } + } + } + replacements + }; + + let mut metadata = self.metadata.unwrap_or_default(); + + // Parse out all package checksums. After we do this we can be in a few + // situations: + // + // * We parsed no checksums. In this situation we're dealing with an old + // lock file and we're gonna fill them all in. + // * We parsed some checksums, but not one for all packages listed. It + // could have been the case that some were listed, then an older Cargo + // client added more dependencies, and now we're going to fill in the + // missing ones. + // * There are too many checksums listed, indicative of an older Cargo + // client removing a package but not updating the checksums listed. + // + // In all of these situations they're part of normal usage, so we don't + // really worry about it. We just try to slurp up as many checksums as + // possible. + let mut checksums = HashMap::new(); + let prefix = "checksum "; + let mut to_remove = Vec::new(); + for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { + to_remove.push(k.to_string()); + let k = &k[prefix.len()..]; + let enc_id: EncodablePackageId = k + .parse() + .chain_err(|| internal("invalid encoding of checksum in lockfile"))?; + let id = match lookup_id(&enc_id) { + Some(id) => id, + _ => continue, + }; + + let v = if v == "" { + None + } else { + Some(v.to_string()) + }; + checksums.insert(id, v); + } + + for k in to_remove { + metadata.remove(&k); + } + + let mut unused_patches = Vec::new(); + for pkg in self.patch.unused { + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + Some(&src) => PackageId::new(&pkg.name, &pkg.version, src)?, + None => continue, + }; + unused_patches.push(id); + } + + Ok(Resolve::new( + g, + replacements, + HashMap::new(), + checksums, + metadata, + unused_patches, + )) + } +} + +fn build_path_deps(ws: &Workspace<'_>) -> HashMap { + // If a crate is **not** a path source, then we're probably in a situation + // such as `cargo install` with a lock file from a remote dependency. In + // that case we don't need to fixup any path dependencies (as they're not + // actually path dependencies any more), so we ignore them. + let members = ws + .members() + .filter(|p| p.package_id().source_id().is_path()) + .collect::>(); + + let mut ret = HashMap::new(); + let mut visited = HashSet::new(); + for member in members.iter() { + ret.insert( + member.package_id().name().to_string(), + member.package_id().source_id(), + ); + visited.insert(member.package_id().source_id()); + } + for member in members.iter() { + build_pkg(member, ws, &mut ret, &mut visited); + } + for deps in ws.root_patch().values() { + for dep in deps { + build_dep(dep, ws, &mut ret, &mut visited); + } + } + for &(_, ref dep) in ws.root_replace() { + build_dep(dep, ws, &mut ret, &mut visited); + } + + return ret; + + fn build_pkg( + pkg: &Package, + ws: &Workspace<'_>, + ret: &mut HashMap, + visited: &mut HashSet, + ) { + for dep in pkg.dependencies() { + build_dep(dep, ws, ret, visited); + } + } + + fn build_dep( + dep: &Dependency, + ws: &Workspace<'_>, + ret: &mut HashMap, + visited: &mut HashSet, + ) { + let id = dep.source_id(); + if visited.contains(&id) || !id.is_path() { + return; + } + let path = match id.url().to_file_path() { + Ok(p) => p.join("Cargo.toml"), + Err(_) => return, + }; + let pkg = match ws.load(&path) { + Ok(p) => p, + Err(_) => return, + }; + ret.insert(pkg.name().to_string(), pkg.package_id().source_id()); + visited.insert(pkg.package_id().source_id()); + build_pkg(&pkg, ws, ret, visited); + } +} + +impl Patch { + fn is_empty(&self) -> bool { + self.unused.is_empty() + } +} + +#[derive(Serialize, Deserialize, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct EncodableDependency { + name: String, + version: String, + source: Option, + dependencies: Option>, + replace: Option, +} + +#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] +pub struct EncodablePackageId { + name: String, + version: String, + source: Option, +} + +impl fmt::Display for EncodablePackageId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} {}", self.name, self.version)?; + if let Some(ref s) = self.source { + write!(f, " ({})", s.to_url())?; + } + Ok(()) + } +} + +impl FromStr for EncodablePackageId { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + let mut s = s.splitn(3, ' '); + let name = s.next().unwrap(); + let version = s + .next() + .ok_or_else(|| internal("invalid serialized PackageId"))?; + let source_id = match s.next() { + Some(s) => { + if s.starts_with('(') && s.ends_with(')') { + Some(SourceId::from_url(&s[1..s.len() - 1])?) + } else { + failure::bail!("invalid serialized PackageId") + } + } + None => None, + }; + + Ok(EncodablePackageId { + name: name.to_string(), + version: version.to_string(), + source: source_id, + }) + } +} + +impl ser::Serialize for EncodablePackageId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + s.collect_str(self) + } +} + +impl<'de> de::Deserialize<'de> for EncodablePackageId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + String::deserialize(d).and_then(|string| { + string + .parse::() + .map_err(de::Error::custom) + }) + } +} + +pub struct WorkspaceResolve<'a, 'cfg: 'a> { + pub ws: &'a Workspace<'cfg>, + pub resolve: &'a Resolve, +} + +impl<'a, 'cfg> ser::Serialize for WorkspaceResolve<'a, 'cfg> { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + let mut ids: Vec<_> = self.resolve.iter().collect(); + ids.sort(); + + let encodable = ids + .iter() + .map(|&id| encodable_resolve_node(id, self.resolve)) + .collect::>(); + + let mut metadata = self.resolve.metadata().clone(); + + for &id in ids.iter().filter(|id| !id.source_id().is_path()) { + let checksum = match self.resolve.checksums()[&id] { + Some(ref s) => &s[..], + None => "", + }; + let id = encodable_package_id(id); + metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string()); + } + + let metadata = if metadata.is_empty() { + None + } else { + Some(metadata) + }; + + let patch = Patch { + unused: self + .resolve + .unused_patches() + .iter() + .map(|id| EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: None, + replace: None, + }) + .collect(), + }; + EncodableResolve { + package: Some(encodable), + root: None, + metadata, + patch, + } + .serialize(s) + } +} + +fn encodable_resolve_node(id: PackageId, resolve: &Resolve) -> EncodableDependency { + let (replace, deps) = match resolve.replacement(id) { + Some(id) => (Some(encodable_package_id(id)), None), + None => { + let mut deps = resolve + .deps_not_replaced(id) + .map(encodable_package_id) + .collect::>(); + deps.sort(); + (None, Some(deps)) + } + }; + + EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: deps, + replace, + } +} + +pub fn encodable_package_id(id: PackageId) -> EncodablePackageId { + EncodablePackageId { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()).map(|s| s.with_precise(None)), + } +} + +fn encode_source(id: SourceId) -> Option { + if id.is_path() { + None + } else { + Some(id) + } +} diff --git a/src/cargo/core/resolver/errors.rs b/src/cargo/core/resolver/errors.rs new file mode 100644 index 000000000..20c8d8887 --- /dev/null +++ b/src/cargo/core/resolver/errors.rs @@ -0,0 +1,295 @@ +use std::collections::BTreeMap; +use std::fmt; + +use crate::core::{Dependency, PackageId, Registry, Summary}; +use crate::util::lev_distance::lev_distance; +use crate::util::Config; +use failure::{Error, Fail}; +use semver; + +use super::context::Context; +use super::types::{Candidate, ConflictReason}; + +/// Error during resolution providing a path of `PackageId`s. +pub struct ResolveError { + cause: Error, + package_path: Vec, +} + +impl ResolveError { + pub fn new>(cause: E, package_path: Vec) -> Self { + Self { + cause: cause.into(), + package_path, + } + } + + /// Returns a path of packages from the package whose requirements could not be resolved up to + /// the root. + pub fn package_path(&self) -> &[PackageId] { + &self.package_path + } +} + +impl Fail for ResolveError { + fn cause(&self) -> Option<&dyn Fail> { + self.cause.as_fail().cause() + } +} + +impl fmt::Debug for ResolveError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.cause.fmt(f) + } +} + +impl fmt::Display for ResolveError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.cause.fmt(f) + } +} + +pub type ActivateResult = Result; + +pub enum ActivateError { + Fatal(failure::Error), + Conflict(PackageId, ConflictReason), +} + +impl From<::failure::Error> for ActivateError { + fn from(t: ::failure::Error) -> Self { + ActivateError::Fatal(t) + } +} + +impl From<(PackageId, ConflictReason)> for ActivateError { + fn from(t: (PackageId, ConflictReason)) -> Self { + ActivateError::Conflict(t.0, t.1) + } +} + +pub(super) fn activation_error( + cx: &Context, + registry: &mut dyn Registry, + parent: &Summary, + dep: &Dependency, + conflicting_activations: &BTreeMap, + candidates: &[Candidate], + config: Option<&Config>, +) -> ResolveError { + let graph = cx.graph(); + let to_resolve_err = |err| { + ResolveError::new( + err, + graph + .path_to_top(&parent.package_id()) + .into_iter() + .cloned() + .collect(), + ) + }; + + if !candidates.is_empty() { + let mut msg = format!("failed to select a version for `{}`.", dep.package_name()); + msg.push_str("\n ... required by "); + msg.push_str(&describe_path(&graph.path_to_top(&parent.package_id()))); + + msg.push_str("\nversions that meet the requirements `"); + msg.push_str(&dep.version_req().to_string()); + msg.push_str("` are: "); + msg.push_str( + &candidates + .iter() + .map(|v| v.summary.version()) + .map(|v| v.to_string()) + .collect::>() + .join(", "), + ); + + let mut conflicting_activations: Vec<_> = conflicting_activations.iter().collect(); + conflicting_activations.sort_unstable(); + let (links_errors, mut other_errors): (Vec<_>, Vec<_>) = conflicting_activations + .drain(..) + .rev() + .partition(|&(_, r)| r.is_links()); + + for &(p, r) in links_errors.iter() { + if let ConflictReason::Links(ref link) = *r { + msg.push_str("\n\nthe package `"); + msg.push_str(&*dep.package_name()); + msg.push_str("` links to the native library `"); + msg.push_str(link); + msg.push_str("`, but it conflicts with a previous package which links to `"); + msg.push_str(link); + msg.push_str("` as well:\n"); + } + msg.push_str(&describe_path(&graph.path_to_top(p))); + } + + let (features_errors, other_errors): (Vec<_>, Vec<_>) = other_errors + .drain(..) + .partition(|&(_, r)| r.is_missing_features()); + + for &(p, r) in features_errors.iter() { + if let ConflictReason::MissingFeatures(ref features) = *r { + msg.push_str("\n\nthe package `"); + msg.push_str(&*p.name()); + msg.push_str("` depends on `"); + msg.push_str(&*dep.package_name()); + msg.push_str("`, with features: `"); + msg.push_str(features); + msg.push_str("` but `"); + msg.push_str(&*dep.package_name()); + msg.push_str("` does not have these features.\n"); + } + // p == parent so the full path is redundant. + } + + if !other_errors.is_empty() { + msg.push_str( + "\n\nall possible versions conflict with \ + previously selected packages.", + ); + } + + for &(p, _) in other_errors.iter() { + msg.push_str("\n\n previously selected "); + msg.push_str(&describe_path(&graph.path_to_top(p))); + } + + msg.push_str("\n\nfailed to select a version for `"); + msg.push_str(&*dep.package_name()); + msg.push_str("` which could resolve this conflict"); + + return to_resolve_err(failure::format_err!("{}", msg)); + } + + // We didn't actually find any candidates, so we need to + // give an error message that nothing was found. + // + // Maybe the user mistyped the ver_req? Like `dep="2"` when `dep="0.2"` + // was meant. So we re-query the registry with `deb="*"` so we can + // list a few versions that were actually found. + let all_req = semver::VersionReq::parse("*").unwrap(); + let mut new_dep = dep.clone(); + new_dep.set_version_req(all_req); + let mut candidates = match registry.query_vec(&new_dep, false) { + Ok(candidates) => candidates, + Err(e) => return to_resolve_err(e), + }; + candidates.sort_unstable_by(|a, b| b.version().cmp(a.version())); + + let mut msg = if !candidates.is_empty() { + let versions = { + let mut versions = candidates + .iter() + .take(3) + .map(|cand| cand.version().to_string()) + .collect::>(); + + if candidates.len() > 3 { + versions.push("...".into()); + } + + versions.join(", ") + }; + + let mut msg = format!( + "failed to select a version for the requirement `{} = \"{}\"`\n \ + candidate versions found which didn't match: {}\n \ + location searched: {}\n", + dep.package_name(), + dep.version_req(), + versions, + registry.describe_source(dep.source_id()), + ); + msg.push_str("required by "); + msg.push_str(&describe_path(&graph.path_to_top(&parent.package_id()))); + + // If we have a path dependency with a locked version, then this may + // indicate that we updated a sub-package and forgot to run `cargo + // update`. In this case try to print a helpful error! + if dep.source_id().is_path() && dep.version_req().to_string().starts_with('=') { + msg.push_str( + "\nconsider running `cargo update` to update \ + a path dependency's locked version", + ); + } + + if registry.is_replaced(dep.source_id()) { + msg.push_str("\nperhaps a crate was updated and forgotten to be re-vendored?"); + } + + msg + } else { + // Maybe the user mistyped the name? Like `dep-thing` when `Dep_Thing` + // was meant. So we try asking the registry for a `fuzzy` search for suggestions. + let mut candidates = Vec::new(); + if let Err(e) = registry.query(&new_dep, &mut |s| candidates.push(s.name()), true) { + return to_resolve_err(e); + }; + candidates.sort_unstable(); + candidates.dedup(); + let mut candidates: Vec<_> = candidates + .iter() + .map(|n| (lev_distance(&*new_dep.package_name(), &*n), n)) + .filter(|&(d, _)| d < 4) + .collect(); + candidates.sort_by_key(|o| o.0); + let mut msg = format!( + "no matching package named `{}` found\n\ + location searched: {}\n", + dep.package_name(), + dep.source_id() + ); + if !candidates.is_empty() { + let mut names = candidates + .iter() + .take(3) + .map(|c| c.1.as_str()) + .collect::>(); + + if candidates.len() > 3 { + names.push("..."); + } + + msg.push_str("perhaps you meant: "); + msg.push_str(&names.iter().enumerate().fold( + String::default(), + |acc, (i, el)| match i { + 0 => acc + el, + i if names.len() - 1 == i && candidates.len() <= 3 => acc + " or " + el, + _ => acc + ", " + el, + }, + )); + msg.push_str("\n"); + } + msg.push_str("required by "); + msg.push_str(&describe_path(&graph.path_to_top(&parent.package_id()))); + + msg + }; + + if let Some(config) = config { + if config.cli_unstable().offline { + msg.push_str( + "\nAs a reminder, you're using offline mode (-Z offline) \ + which can sometimes cause surprising resolution failures, \ + if this error is too confusing you may wish to retry \ + without the offline flag.", + ); + } + } + + to_resolve_err(failure::format_err!("{}", msg)) +} + +/// Returns String representation of dependency chain for a particular `pkgid`. +pub(super) fn describe_path(path: &[&PackageId]) -> String { + use std::fmt::Write; + let mut dep_path_desc = format!("package `{}`", path[0]); + for dep in path[1..].iter() { + write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); + } + dep_path_desc +} diff --git a/src/cargo/core/resolver/mod.rs b/src/cargo/core/resolver/mod.rs new file mode 100644 index 000000000..ba1ef7d07 --- /dev/null +++ b/src/cargo/core/resolver/mod.rs @@ -0,0 +1,908 @@ +//! Resolution of the entire dependency graph for a crate. +//! +//! This module implements the core logic in taking the world of crates and +//! constraints and creating a resolved graph with locked versions for all +//! crates and their dependencies. This is separate from the registry module +//! which is more worried about discovering crates from various sources, this +//! module just uses the Registry trait as a source to learn about crates from. +//! +//! Actually solving a constraint graph is an NP-hard problem. This algorithm +//! is basically a nice heuristic to make sure we get roughly the best answer +//! most of the time. The constraints that we're working with are: +//! +//! 1. Each crate can have any number of dependencies. Each dependency can +//! declare a version range that it is compatible with. +//! 2. Crates can be activated with multiple version (e.g., show up in the +//! dependency graph twice) so long as each pairwise instance have +//! semver-incompatible versions. +//! +//! The algorithm employed here is fairly simple, we simply do a DFS, activating +//! the "newest crate" (highest version) first and then going to the next +//! option. The heuristics we employ are: +//! +//! * Never try to activate a crate version which is incompatible. This means we +//! only try crates which will actually satisfy a dependency and we won't ever +//! try to activate a crate that's semver compatible with something else +//! activated (as we're only allowed to have one) nor try to activate a crate +//! that has the same links attribute as something else +//! activated. +//! * Always try to activate the highest version crate first. The default +//! dependency in Cargo (e.g., when you write `foo = "0.1.2"`) is +//! semver-compatible, so selecting the highest version possible will allow us +//! to hopefully satisfy as many dependencies at once. +//! +//! Beyond that, what's implemented below is just a naive backtracking version +//! which should in theory try all possible combinations of dependencies and +//! versions to see if one works. The first resolution that works causes +//! everything to bail out immediately and return success, and only if *nothing* +//! works do we actually return an error up the stack. +//! +//! ## Performance +//! +//! Note that this is a relatively performance-critical portion of Cargo. The +//! data that we're processing is proportional to the size of the dependency +//! graph, which can often be quite large (e.g., take a look at Servo). To make +//! matters worse the DFS algorithm we're implemented is inherently quite +//! inefficient. When we add the requirement of backtracking on top it means +//! that we're implementing something that probably shouldn't be allocating all +//! over the place. + +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::mem; +use std::rc::Rc; +use std::time::{Duration, Instant}; + +use log::{debug, trace}; + +use crate::core::interning::InternedString; +use crate::core::PackageIdSpec; +use crate::core::{Dependency, PackageId, Registry, Summary}; +use crate::util::config::Config; +use crate::util::errors::CargoResult; +use crate::util::profile; + +use self::context::{Activations, Context}; +use self::types::{Candidate, ConflictReason, DepsFrame, GraphNode}; +use self::types::{RcVecIter, RegistryQueryer, RemainingDeps, ResolverProgress}; + +pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; +pub use self::encode::{Metadata, WorkspaceResolve}; +pub use self::errors::{ActivateError, ActivateResult, ResolveError}; +pub use self::resolve::Resolve; +pub use self::types::Method; + +mod conflict_cache; +mod context; +mod encode; +mod errors; +mod resolve; +mod types; + +/// Builds the list of all packages required to build the first argument. +/// +/// * `summaries` - the list of package summaries along with how to resolve +/// their features. This is a list of all top-level packages that are intended +/// to be part of the lock file (resolve output). These typically are a list +/// of all workspace members. +/// +/// * `replacements` - this is a list of `[replace]` directives found in the +/// root of the workspace. The list here is a `PackageIdSpec` of what to +/// replace and a `Dependency` to replace that with. In general it's not +/// recommended to use `[replace]` any more and use `[patch]` instead, which +/// is supported elsewhere. +/// +/// * `registry` - this is the source from which all package summaries are +/// loaded. It's expected that this is extensively configured ahead of time +/// and is idempotent with our requests to it (aka returns the same results +/// for the same query every time). Typically this is an instance of a +/// `PackageRegistry`. +/// +/// * `try_to_use` - this is a list of package IDs which were previously found +/// in the lock file. We heuristically prefer the ids listed in `try_to_use` +/// when sorting candidates to activate, but otherwise this isn't used +/// anywhere else. +/// +/// * `config` - a location to print warnings and such, or `None` if no warnings +/// should be printed +/// +/// * `print_warnings` - whether or not to print backwards-compatibility +/// warnings and such +pub fn resolve( + summaries: &[(Summary, Method<'_>)], + replacements: &[(PackageIdSpec, Dependency)], + registry: &mut dyn Registry, + try_to_use: &HashSet, + config: Option<&Config>, + print_warnings: bool, +) -> CargoResult { + let cx = Context::new(); + let _p = profile::start("resolving"); + let minimal_versions = match config { + Some(config) => config.cli_unstable().minimal_versions, + None => false, + }; + let mut registry = RegistryQueryer::new(registry, replacements, try_to_use, minimal_versions); + let cx = activate_deps_loop(cx, &mut registry, summaries, config)?; + + let mut cksums = HashMap::new(); + for summary in cx.activations.values().flat_map(|v| v.iter()) { + let cksum = summary.checksum().map(|s| s.to_string()); + cksums.insert(summary.package_id(), cksum); + } + let resolve = Resolve::new( + cx.graph(), + cx.resolve_replacements(), + cx.resolve_features + .iter() + .map(|(k, v)| (*k, v.iter().map(|x| x.to_string()).collect())) + .collect(), + cksums, + BTreeMap::new(), + Vec::new(), + ); + + check_cycles(&resolve, &cx.activations)?; + check_duplicate_pkgs_in_lockfile(&resolve)?; + trace!("resolved: {:?}", resolve); + + // If we have a shell, emit warnings about required deps used as feature. + if let Some(config) = config { + if print_warnings { + let mut shell = config.shell(); + let mut warnings = &cx.warnings; + while let Some(ref head) = warnings.head { + shell.warn(&head.0)?; + warnings = &head.1; + } + } + } + + Ok(resolve) +} + +/// Recursively activates the dependencies for `top`, in depth-first order, +/// backtracking across possible candidates for each dependency as necessary. +/// +/// If all dependencies can be activated and resolved to a version in the +/// dependency graph, cx.resolve is returned. +fn activate_deps_loop( + mut cx: Context, + registry: &mut RegistryQueryer<'_>, + summaries: &[(Summary, Method<'_>)], + config: Option<&Config>, +) -> CargoResult { + let mut backtrack_stack = Vec::new(); + let mut remaining_deps = RemainingDeps::new(); + + // `past_conflicting_activations` is a cache of the reasons for each time we + // backtrack. + let mut past_conflicting_activations = conflict_cache::ConflictCache::new(); + + // Activate all the initial summaries to kick off some work. + for &(ref summary, ref method) in summaries { + debug!("initial activation: {}", summary.package_id()); + let candidate = Candidate { + summary: summary.clone(), + replace: None, + }; + let res = activate(&mut cx, registry, None, candidate, method); + match res { + Ok(Some((frame, _))) => remaining_deps.push(frame), + Ok(None) => (), + Err(ActivateError::Fatal(e)) => return Err(e), + Err(ActivateError::Conflict(_, _)) => panic!("bad error from activate"), + } + } + + let mut printed = ResolverProgress::new(); + + // Main resolution loop, this is the workhorse of the resolution algorithm. + // + // You'll note that a few stacks are maintained on the side, which might + // seem odd when this algorithm looks like it could be implemented + // recursively. While correct, this is implemented iteratively to avoid + // blowing the stack (the recursion depth is proportional to the size of the + // input). + // + // The general sketch of this loop is to run until there are no dependencies + // left to activate, and for each dependency to attempt to activate all of + // its own dependencies in turn. The `backtrack_stack` is a side table of + // backtracking states where if we hit an error we can return to in order to + // attempt to continue resolving. + while let Some((just_here_for_the_error_messages, frame)) = + remaining_deps.pop_most_constrained() + { + let (mut parent, (mut cur, (mut dep, candidates, mut features))) = frame; + + // If we spend a lot of time here (we shouldn't in most cases) then give + // a bit of a visual indicator as to what we're doing. + printed.shell_status(config)?; + + trace!( + "{}[{}]>{} {} candidates", + parent.name(), + cur, + dep.package_name(), + candidates.len() + ); + trace!( + "{}[{}]>{} {} prev activations", + parent.name(), + cur, + dep.package_name(), + cx.prev_active(&dep).len() + ); + + let just_here_for_the_error_messages = just_here_for_the_error_messages + && past_conflicting_activations + .conflicting(&cx, &dep) + .is_some(); + + let mut remaining_candidates = RemainingCandidates::new(&candidates); + + // `conflicting_activations` stores all the reasons we were unable to + // activate candidates. One of these reasons will have to go away for + // backtracking to find a place to restart. It is also the list of + // things to explain in the error message if we fail to resolve. + // + // This is a map of package ID to a reason why that packaged caused a + // conflict for us. + let mut conflicting_activations = BTreeMap::new(); + + // When backtracking we don't fully update `conflicting_activations` + // especially for the cases that we didn't make a backtrack frame in the + // first place. This `backtracked` var stores whether we are continuing + // from a restored backtrack frame so that we can skip caching + // `conflicting_activations` in `past_conflicting_activations` + let mut backtracked = false; + + loop { + let next = remaining_candidates.next(&mut conflicting_activations, &cx, &dep); + + let (candidate, has_another) = next.ok_or(()).or_else(|_| { + // If we get here then our `remaining_candidates` was just + // exhausted, so `dep` failed to activate. + // + // It's our job here to backtrack, if possible, and find a + // different candidate to activate. If we can't find any + // candidates whatsoever then it's time to bail entirely. + trace!( + "{}[{}]>{} -- no candidates", + parent.name(), + cur, + dep.package_name() + ); + + // Use our list of `conflicting_activations` to add to our + // global list of past conflicting activations, effectively + // globally poisoning `dep` if `conflicting_activations` ever + // shows up again. We'll use the `past_conflicting_activations` + // below to determine if a dependency is poisoned and skip as + // much work as possible. + // + // If we're only here for the error messages then there's no + // need to try this as this dependency is already known to be + // bad. + // + // As we mentioned above with the `backtracked` variable if this + // local is set to `true` then our `conflicting_activations` may + // not be right, so we can't push into our global cache. + if !just_here_for_the_error_messages && !backtracked { + past_conflicting_activations.insert(&dep, &conflicting_activations); + } + + match find_candidate( + &mut backtrack_stack, + &parent, + backtracked, + &conflicting_activations, + ) { + Some((candidate, has_another, frame)) => { + // Reset all of our local variables used with the + // contents of `frame` to complete our backtrack. + cur = frame.cur; + cx = frame.context; + remaining_deps = frame.remaining_deps; + remaining_candidates = frame.remaining_candidates; + parent = frame.parent; + dep = frame.dep; + features = frame.features; + conflicting_activations = frame.conflicting_activations; + backtracked = true; + Ok((candidate, has_another)) + } + None => { + debug!("no candidates found"); + Err(errors::activation_error( + &cx, + registry.registry, + &parent, + &dep, + &conflicting_activations, + &candidates, + config, + )) + } + } + })?; + + // If we're only here for the error messages then we know that this + // activation will fail one way or another. To that end if we've got + // more candidates we want to fast-forward to the last one as + // otherwise we'll just backtrack here anyway (helping us to skip + // some work). + if just_here_for_the_error_messages && !backtracked && has_another { + continue; + } + + // We have a `candidate`. Create a `BacktrackFrame` so we can add it + // to the `backtrack_stack` later if activation succeeds. + // + // Note that if we don't actually have another candidate then there + // will be nothing to backtrack to so we skip construction of the + // frame. This is a relatively important optimization as a number of + // the `clone` calls below can be quite expensive, so we avoid them + // if we can. + let backtrack = if has_another { + Some(BacktrackFrame { + cur, + context: Context::clone(&cx), + remaining_deps: remaining_deps.clone(), + remaining_candidates: remaining_candidates.clone(), + parent: Summary::clone(&parent), + dep: Dependency::clone(&dep), + features: Rc::clone(&features), + conflicting_activations: conflicting_activations.clone(), + }) + } else { + None + }; + + let pid = candidate.summary.package_id(); + let method = Method::Required { + dev_deps: false, + features: &features, + all_features: false, + uses_default_features: dep.uses_default_features(), + }; + trace!( + "{}[{}]>{} trying {}", + parent.name(), + cur, + dep.package_name(), + candidate.summary.version() + ); + let res = activate(&mut cx, registry, Some((&parent, &dep)), candidate, &method); + + let successfully_activated = match res { + // Success! We've now activated our `candidate` in our context + // and we're almost ready to move on. We may want to scrap this + // frame in the end if it looks like it's not going to end well, + // so figure that out here. + Ok(Some((mut frame, dur))) => { + printed.elapsed(dur); + + // Our `frame` here is a new package with its own list of + // dependencies. Do a sanity check here of all those + // dependencies by cross-referencing our global + // `past_conflicting_activations`. Recall that map is a + // global cache which lists sets of packages where, when + // activated, the dependency is unresolvable. + // + // If any our our frame's dependencies fit in that bucket, + // aka known unresolvable, then we extend our own set of + // conflicting activations with theirs. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves can't be activated, so we know that they + // conflict with us. + let mut has_past_conflicting_dep = just_here_for_the_error_messages; + if !has_past_conflicting_dep { + if let Some(conflicting) = frame + .remaining_siblings + .clone() + .filter_map(|(_, (ref new_dep, _, _))| { + past_conflicting_activations.conflicting(&cx, new_dep) + }) + .next() + { + // If one of our deps is known unresolvable + // then we will not succeed. + // How ever if we are part of the reason that + // one of our deps conflicts then + // we can make a stronger statement + // because we will definitely be activated when + // we try our dep. + conflicting_activations.extend( + conflicting + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(&p, r)| (p, r.clone())), + ); + + has_past_conflicting_dep = true; + } + } + // If any of `remaining_deps` are known unresolvable with + // us activated, then we extend our own set of + // conflicting activations with theirs and its parent. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves are incompatible with that dep, so we know that deps + // parent conflict with us. + if !has_past_conflicting_dep { + if let Some(known_related_bad_deps) = + past_conflicting_activations.dependencies_conflicting_with(pid) + { + if let Some((other_parent, conflict)) = remaining_deps + .iter() + // for deps related to us + .filter(|&(_, ref other_dep)| { + known_related_bad_deps.contains(other_dep) + }) + .filter_map(|(other_parent, other_dep)| { + past_conflicting_activations + .find_conflicting(&cx, &other_dep, Some(pid)) + .map(|con| (other_parent, con)) + }) + .next() + { + let rel = conflict.get(&pid).unwrap().clone(); + + // The conflict we found is + // "other dep will not succeed if we are activated." + // We want to add + // "our dep will not succeed if other dep is in remaining_deps" + // but that is not how the cache is set up. + // So we add the less general but much faster, + // "our dep will not succeed if other dep's parent is activated". + conflicting_activations.extend( + conflict + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(&p, r)| (p, r.clone())), + ); + conflicting_activations.insert(other_parent, rel); + has_past_conflicting_dep = true; + } + } + } + + // Ok if we're in a "known failure" state for this frame we + // may want to skip it altogether though. We don't want to + // skip it though in the case that we're displaying error + // messages to the user! + // + // Here we need to figure out if the user will see if we + // skipped this candidate (if it's known to fail, aka has a + // conflicting dep and we're the last candidate). If we're + // here for the error messages, we can't skip it (but we can + // prune extra work). If we don't have any candidates in our + // backtrack stack then we're the last line of defense, so + // we'll want to present an error message for sure. + let activate_for_error_message = has_past_conflicting_dep && !has_another && { + just_here_for_the_error_messages || { + find_candidate( + &mut backtrack_stack.clone(), + &parent, + backtracked, + &conflicting_activations, + ) + .is_none() + } + }; + + // If we're only here for the error messages then we know + // one of our candidate deps will fail, meaning we will + // fail and that none of the backtrack frames will find a + // candidate that will help. Consequently let's clean up the + // no longer needed backtrack frames. + if activate_for_error_message { + backtrack_stack.clear(); + } + + // If we don't know for a fact that we'll fail or if we're + // just here for the error message then we push this frame + // onto our list of to-be-resolve, which will generate more + // work for us later on. + // + // Otherwise we're guaranteed to fail and were not here for + // error messages, so we skip work and don't push anything + // onto our stack. + frame.just_for_error_messages = has_past_conflicting_dep; + if !has_past_conflicting_dep || activate_for_error_message { + remaining_deps.push(frame); + true + } else { + trace!( + "{}[{}]>{} skipping {} ", + parent.name(), + cur, + dep.package_name(), + pid.version() + ); + false + } + } + + // This candidate's already activated, so there's no extra work + // for us to do. Let's keep going. + Ok(None) => true, + + // We failed with a super fatal error (like a network error), so + // bail out as quickly as possible as we can't reliably + // backtrack from errors like these + Err(ActivateError::Fatal(e)) => return Err(e), + + // We failed due to a bland conflict, bah! Record this in our + // frame's list of conflicting activations as to why this + // candidate failed, and then move on. + Err(ActivateError::Conflict(id, reason)) => { + conflicting_activations.insert(id, reason); + false + } + }; + + // If we've successfully activated then save off the backtrack frame + // if one was created, and otherwise break out of the inner + // activation loop as we're ready to move to the next dependency + if successfully_activated { + backtrack_stack.extend(backtrack); + break; + } + + // We've failed to activate this dependency, oh dear! Our call to + // `activate` above may have altered our `cx` local variable, so + // restore it back if we've got a backtrack frame. + // + // If we don't have a backtrack frame then we're just using the `cx` + // for error messages anyway so we can live with a little + // imprecision. + if let Some(b) = backtrack { + cx = b.context; + } + } + + // Ok phew, that loop was a big one! If we've broken out then we've + // successfully activated a candidate. Our stacks are all in place that + // we're ready to move on to the next dependency that needs activation, + // so loop back to the top of the function here. + } + + Ok(cx) +} + +/// Attempts to activate the summary `candidate` in the context `cx`. +/// +/// This function will pull dependency summaries from the registry provided, and +/// the dependencies of the package will be determined by the `method` provided. +/// If `candidate` was activated, this function returns the dependency frame to +/// iterate through next. +fn activate( + cx: &mut Context, + registry: &mut RegistryQueryer<'_>, + parent: Option<(&Summary, &Dependency)>, + candidate: Candidate, + method: &Method<'_>, +) -> ActivateResult> { + if let Some((parent, dep)) = parent { + cx.resolve_graph.push(GraphNode::Link( + parent.package_id(), + candidate.summary.package_id(), + dep.clone(), + )); + } + + let activated = cx.flag_activated(&candidate.summary, method)?; + + let candidate = match candidate.replace { + Some(replace) => { + cx.resolve_replacements + .push((candidate.summary.package_id(), replace.package_id())); + if cx.flag_activated(&replace, method)? && activated { + return Ok(None); + } + trace!( + "activating {} (replacing {})", + replace.package_id(), + candidate.summary.package_id() + ); + replace + } + None => { + if activated { + return Ok(None); + } + trace!("activating {}", candidate.summary.package_id()); + candidate.summary + } + }; + + let now = Instant::now(); + let deps = cx.build_deps(registry, parent.map(|p| p.0), &candidate, method)?; + let frame = DepsFrame { + parent: candidate, + just_for_error_messages: false, + remaining_siblings: RcVecIter::new(Rc::new(deps)), + }; + Ok(Some((frame, now.elapsed()))) +} + +#[derive(Clone)] +struct BacktrackFrame { + cur: usize, + context: Context, + remaining_deps: RemainingDeps, + remaining_candidates: RemainingCandidates, + parent: Summary, + dep: Dependency, + features: Rc>, + conflicting_activations: BTreeMap, +} + +/// A helper "iterator" used to extract candidates within a current `Context` of +/// a dependency graph. +/// +/// This struct doesn't literally implement the `Iterator` trait (requires a few +/// more inputs) but in general acts like one. Each `RemainingCandidates` is +/// created with a list of candidates to choose from. When attempting to iterate +/// over the list of candidates only *valid* candidates are returned. Validity +/// is defined within a `Context`. +/// +/// Candidates passed to `new` may not be returned from `next` as they could be +/// filtered out, and as they are filtered the causes will be added to `conflicting_prev_active`. +#[derive(Clone)] +struct RemainingCandidates { + remaining: RcVecIter, + // This is a inlined peekable generator + has_another: Option, +} + +impl RemainingCandidates { + fn new(candidates: &Rc>) -> RemainingCandidates { + RemainingCandidates { + remaining: RcVecIter::new(Rc::clone(candidates)), + has_another: None, + } + } + + /// Attempts to find another candidate to check from this list. + /// + /// This method will attempt to move this iterator forward, returning a + /// candidate that's possible to activate. The `cx` argument is the current + /// context which determines validity for candidates returned, and the `dep` + /// is the dependency listing that we're activating for. + /// + /// If successful a `(Candidate, bool)` pair will be returned. The + /// `Candidate` is the candidate to attempt to activate, and the `bool` is + /// an indicator of whether there are remaining candidates to try of if + /// we've reached the end of iteration. + /// + /// If we've reached the end of the iterator here then `Err` will be + /// returned. The error will contain a map of package ID to conflict reason, + /// where each package ID caused a candidate to be filtered out from the + /// original list for the reason listed. + fn next( + &mut self, + conflicting_prev_active: &mut BTreeMap, + cx: &Context, + dep: &Dependency, + ) -> Option<(Candidate, bool)> { + let prev_active = cx.prev_active(dep); + + for (_, b) in self.remaining.by_ref() { + // The `links` key in the manifest dictates that there's only one + // package in a dependency graph, globally, with that particular + // `links` key. If this candidate links to something that's already + // linked to by a different package then we've gotta skip this. + if let Some(link) = b.summary.links() { + if let Some(&a) = cx.links.get(&link) { + if a != b.summary.package_id() { + conflicting_prev_active + .entry(a) + .or_insert_with(|| ConflictReason::Links(link)); + continue; + } + } + } + + // Otherwise the condition for being a valid candidate relies on + // semver. Cargo dictates that you can't duplicate multiple + // semver-compatible versions of a crate. For example we can't + // simultaneously activate `foo 1.0.2` and `foo 1.2.0`. We can, + // however, activate `1.0.2` and `2.0.0`. + // + // Here we throw out our candidate if it's *compatible*, yet not + // equal, to all previously activated versions. + if let Some(a) = prev_active + .iter() + .find(|a| compatible(a.version(), b.summary.version())) + { + if *a != b.summary { + conflicting_prev_active + .entry(a.package_id()) + .or_insert(ConflictReason::Semver); + continue; + } + } + + // Well if we made it this far then we've got a valid dependency. We + // want this iterator to be inherently "peekable" so we don't + // necessarily return the item just yet. Instead we stash it away to + // get returned later, and if we replaced something then that was + // actually the candidate to try first so we return that. + if let Some(r) = mem::replace(&mut self.has_another, Some(b)) { + return Some((r, true)); + } + } + + // Alright we've entirely exhausted our list of candidates. If we've got + // something stashed away return that here (also indicating that there's + // nothing else). + self.has_another.take().map(|r| (r, false)) + } +} + +// Returns if `a` and `b` are compatible in the semver sense. This is a +// commutative operation. +// +// Versions `a` and `b` are compatible if their left-most nonzero digit is the +// same. +fn compatible(a: &semver::Version, b: &semver::Version) -> bool { + if a.major != b.major { + return false; + } + if a.major != 0 { + return true; + } + if a.minor != b.minor { + return false; + } + if a.minor != 0 { + return true; + } + a.patch == b.patch +} + +/// Looks through the states in `backtrack_stack` for dependencies with +/// remaining candidates. For each one, also checks if rolling back +/// could change the outcome of the failed resolution that caused backtracking +/// in the first place. Namely, if we've backtracked past the parent of the +/// failed dep, or any of the packages flagged as giving us trouble in +/// `conflicting_activations`. +/// +/// Read +/// For several more detailed explanations of the logic here. +fn find_candidate( + backtrack_stack: &mut Vec, + parent: &Summary, + backtracked: bool, + conflicting_activations: &BTreeMap, +) -> Option<(Candidate, bool, BacktrackFrame)> { + while let Some(mut frame) = backtrack_stack.pop() { + let next = frame.remaining_candidates.next( + &mut frame.conflicting_activations, + &frame.context, + &frame.dep, + ); + let (candidate, has_another) = match next { + Some(pair) => pair, + None => continue, + }; + // When we're calling this method we know that `parent` failed to + // activate. That means that some dependency failed to get resolved for + // whatever reason, and all of those reasons (plus maybe some extras) + // are listed in `conflicting_activations`. + // + // This means that if all members of `conflicting_activations` are still + // active in this back up we know that we're guaranteed to not actually + // make any progress. As a result if we hit this condition we can + // completely skip this backtrack frame and move on to the next. + if !backtracked + && frame + .context + .is_conflicting(Some(parent.package_id()), conflicting_activations) + { + trace!( + "{} = \"{}\" skip as not solving {}: {:?}", + frame.dep.package_name(), + frame.dep.version_req(), + parent.package_id(), + conflicting_activations + ); + continue; + } + + return Some((candidate, has_another, frame)); + } + None +} + +fn check_cycles(resolve: &Resolve, activations: &Activations) -> CargoResult<()> { + let summaries: HashMap = activations + .values() + .flat_map(|v| v.iter()) + .map(|s| (s.package_id(), s)) + .collect(); + + // Sort packages to produce user friendly deterministic errors. + let mut all_packages: Vec<_> = resolve.iter().collect(); + all_packages.sort_unstable(); + let mut checked = HashSet::new(); + for pkg in all_packages { + if !checked.contains(&pkg) { + visit(resolve, pkg, &summaries, &mut HashSet::new(), &mut checked)? + } + } + return Ok(()); + + fn visit( + resolve: &Resolve, + id: PackageId, + summaries: &HashMap, + visited: &mut HashSet, + checked: &mut HashSet, + ) -> CargoResult<()> { + // See if we visited ourselves + if !visited.insert(id) { + failure::bail!( + "cyclic package dependency: package `{}` depends on itself. Cycle:\n{}", + id, + errors::describe_path(&resolve.path_to_top(&id)) + ); + } + + // If we've already checked this node no need to recurse again as we'll + // just conclude the same thing as last time, so we only execute the + // recursive step if we successfully insert into `checked`. + // + // Note that if we hit an intransitive dependency then we clear out the + // visitation list as we can't induce a cycle through transitive + // dependencies. + if checked.insert(id) { + let summary = summaries[&id]; + for dep in resolve.deps_not_replaced(id) { + let is_transitive = summary + .dependencies() + .iter() + .any(|d| d.matches_id(dep) && d.is_transitive()); + let mut empty = HashSet::new(); + let visited = if is_transitive { + &mut *visited + } else { + &mut empty + }; + visit(resolve, dep, summaries, visited, checked)?; + + if let Some(id) = resolve.replacement(dep) { + visit(resolve, id, summaries, visited, checked)?; + } + } + } + + // Ok, we're done, no longer visiting our node any more + visited.remove(&id); + Ok(()) + } +} + +/// Checks that packages are unique when written to lock file. +/// +/// When writing package ID's to lock file, we apply lossy encoding. In +/// particular, we don't store paths of path dependencies. That means that +/// *different* packages may collide in the lock file, hence this check. +fn check_duplicate_pkgs_in_lockfile(resolve: &Resolve) -> CargoResult<()> { + let mut unique_pkg_ids = HashMap::new(); + for pkg_id in resolve.iter() { + let encodable_pkd_id = encode::encodable_package_id(pkg_id); + if let Some(prev_pkg_id) = unique_pkg_ids.insert(encodable_pkd_id, pkg_id) { + failure::bail!( + "package collision in the lockfile: packages {} and {} are different, \ + but only one can be written to lockfile unambiguously", + prev_pkg_id, + pkg_id + ) + } + } + Ok(()) +} diff --git a/src/cargo/core/resolver/resolve.rs b/src/cargo/core/resolver/resolve.rs new file mode 100644 index 000000000..87117af25 --- /dev/null +++ b/src/cargo/core/resolver/resolve.rs @@ -0,0 +1,284 @@ +use std::borrow::Borrow; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::hash::Hash; +use std::iter::FromIterator; + +use url::Url; + +use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target}; +use crate::util::errors::CargoResult; +use crate::util::Graph; + +use super::encode::Metadata; + +/// Represents a fully-resolved package dependency graph. Each node in the graph +/// is a package and edges represent dependencies between packages. +/// +/// Each instance of `Resolve` also understands the full set of features used +/// for each package. +#[derive(PartialEq)] +pub struct Resolve { + /// A graph, whose vertices are packages and edges are dependency specifications + /// from `Cargo.toml`. We need a `Vec` here because the same package + /// might be present in both `[dependencies]` and `[build-dependencies]`. + graph: Graph>, + replacements: HashMap, + reverse_replacements: HashMap, + empty_features: HashSet, + features: HashMap>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, +} + +impl Resolve { + pub fn new( + graph: Graph>, + replacements: HashMap, + features: HashMap>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, + ) -> Resolve { + let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect(); + Resolve { + graph, + replacements, + features, + checksums, + metadata, + unused_patches, + empty_features: HashSet::new(), + reverse_replacements, + } + } + + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> { + self.graph.path_to_top(pkg) + } + + pub fn register_used_patches(&mut self, patches: &HashMap>) { + for summary in patches.values().flat_map(|v| v) { + if self.iter().any(|id| id == summary.package_id()) { + continue; + } + self.unused_patches.push(summary.package_id()); + } + } + + pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> { + // Given a previous instance of resolve, it should be forbidden to ever + // have a checksums which *differ*. If the same package ID has differing + // checksums, then something has gone wrong such as: + // + // * Something got seriously corrupted + // * A "mirror" isn't actually a mirror as some changes were made + // * A replacement source wasn't actually a replacment, some changes + // were made + // + // In all of these cases, we want to report an error to indicate that + // something is awry. Normal execution (esp just using crates.io) should + // never run into this. + for (id, cksum) in previous.checksums.iter() { + if let Some(mine) = self.checksums.get(id) { + if mine == cksum { + continue; + } + + // If the previous checksum wasn't calculated, the current + // checksum is `Some`. This may indicate that a source was + // erroneously replaced or was replaced with something that + // desires stronger checksum guarantees than can be afforded + // elsewhere. + if cksum.is_none() { + failure::bail!( + "\ +checksum for `{}` was not previously calculated, but a checksum could now \ +be calculated + +this could be indicative of a few possible situations: + + * the source `{}` did not previously support checksums, + but was replaced with one that does + * newer Cargo implementations know how to checksum this source, but this + older implementation does not + * the lock file is corrupt +", + id, + id.source_id() + ) + + // If our checksum hasn't been calculated, then it could mean + // that future Cargo figured out how to checksum something or + // more realistically we were overridden with a source that does + // not have checksums. + } else if mine.is_none() { + failure::bail!( + "\ +checksum for `{}` could not be calculated, but a checksum is listed in \ +the existing lock file + +this could be indicative of a few possible situations: + + * the source `{}` supports checksums, + but was replaced with one that doesn't + * the lock file is corrupt + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id, + id.source_id() + ) + + // If the checksums aren't equal, and neither is None, then they + // must both be Some, in which case the checksum now differs. + // That's quite bad! + } else { + failure::bail!( + "\ +checksum for `{}` changed between lock files + +this could be indicative of a few possible errors: + + * the lock file is corrupt + * a replacement source in use (e.g., a mirror) returned a different checksum + * the source itself may be corrupt in one way or another + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id + ); + } + } + } + + // Be sure to just copy over any unknown metadata. + self.metadata = previous.metadata.clone(); + Ok(()) + } + + pub fn contains(&self, k: &Q) -> bool + where + PackageId: Borrow, + Q: Hash + Eq, + { + self.graph.contains(k) + } + + pub fn sort(&self) -> Vec { + self.graph.sort() + } + + pub fn iter<'a>(&'a self) -> impl Iterator + 'a { + self.graph.iter().cloned() + } + + pub fn deps(&self, pkg: PackageId) -> impl Iterator { + self.graph + .edges(&pkg) + .map(move |(&id, deps)| (self.replacement(id).unwrap_or(id), deps.as_slice())) + } + + pub fn deps_not_replaced<'a>(&'a self, pkg: PackageId) -> impl Iterator + 'a { + self.graph.edges(&pkg).map(|(&id, _)| id) + } + + pub fn replacement(&self, pkg: PackageId) -> Option { + self.replacements.get(&pkg).cloned() + } + + pub fn replacements(&self) -> &HashMap { + &self.replacements + } + + pub fn features(&self, pkg: PackageId) -> &HashSet { + self.features.get(&pkg).unwrap_or(&self.empty_features) + } + + pub fn features_sorted(&self, pkg: PackageId) -> Vec<&str> { + let mut v = Vec::from_iter(self.features(pkg).iter().map(|s| s.as_ref())); + v.sort_unstable(); + v + } + + pub fn query(&self, spec: &str) -> CargoResult { + PackageIdSpec::query_str(spec, self.iter()) + } + + pub fn unused_patches(&self) -> &[PackageId] { + &self.unused_patches + } + + pub fn checksums(&self) -> &HashMap> { + &self.checksums + } + + pub fn metadata(&self) -> &Metadata { + &self.metadata + } + + pub fn extern_crate_name( + &self, + from: PackageId, + to: PackageId, + to_target: &Target, + ) -> CargoResult { + let deps = if from == to { + &[] + } else { + self.dependencies_listed(from, to) + }; + + let crate_name = to_target.crate_name(); + let mut names = deps.iter().map(|d| { + d.explicit_name_in_toml() + .map(|s| s.as_str().replace("-", "_")) + .unwrap_or_else(|| crate_name.clone()) + }); + let name = names.next().unwrap_or_else(|| crate_name.clone()); + for n in names { + failure::ensure!( + n == name, + "the crate `{}` depends on crate `{}` multiple times with different names", + from, + to, + ); + } + Ok(name) + } + + fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &[Dependency] { + // We've got a dependency on `from` to `to`, but this dependency edge + // may be affected by [replace]. If the `to` package is listed as the + // target of a replacement (aka the key of a reverse replacement map) + // then we try to find our dependency edge through that. If that fails + // then we go down below assuming it's not replaced. + // + // Note that we don't treat `from` as if it's been replaced because + // that's where the dependency originates from, and we only replace + // targets of dependencies not the originator. + if let Some(replace) = self.reverse_replacements.get(&to) { + if let Some(deps) = self.graph.edge(&from, replace) { + return deps; + } + } + match self.graph.edge(&from, &to) { + Some(ret) => ret, + None => panic!("no Dependency listed for `{}` => `{}`", from, to), + } + } +} + +impl fmt::Debug for Resolve { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(fmt, "graph: {:?}", self.graph)?; + writeln!(fmt, "\nfeatures: {{")?; + for (pkg, features) in &self.features { + writeln!(fmt, " {}: {:?}", pkg, features)?; + } + write!(fmt, "}}") + } +} diff --git a/src/cargo/core/resolver/types.rs b/src/cargo/core/resolver/types.rs new file mode 100644 index 000000000..cdef34480 --- /dev/null +++ b/src/cargo/core/resolver/types.rs @@ -0,0 +1,519 @@ +use std::cmp::Ordering; +use std::collections::{HashMap, HashSet}; +use std::ops::Range; +use std::rc::Rc; +use std::time::{Duration, Instant}; + +use log::debug; + +use crate::core::interning::InternedString; +use crate::core::{Dependency, PackageId, PackageIdSpec, Registry, Summary}; +use crate::util::errors::CargoResult; +use crate::util::Config; + +use im_rc; + +pub struct ResolverProgress { + ticks: u16, + start: Instant, + time_to_print: Duration, + printed: bool, + deps_time: Duration, + #[cfg(debug_assertions)] + slow_cpu_multiplier: u64, +} + +impl ResolverProgress { + pub fn new() -> ResolverProgress { + ResolverProgress { + ticks: 0, + start: Instant::now(), + time_to_print: Duration::from_millis(500), + printed: false, + deps_time: Duration::new(0, 0), + // Some CI setups are much slower then the equipment used by Cargo itself. + // Architectures that do not have a modern processor, hardware emulation, ect. + // In the test code we have `slow_cpu_multiplier`, but that is not accessible here. + #[cfg(debug_assertions)] + slow_cpu_multiplier: std::env::var("CARGO_TEST_SLOW_CPU_MULTIPLIER") + .ok() + .and_then(|m| m.parse().ok()) + .unwrap_or(1), + } + } + pub fn shell_status(&mut self, config: Option<&Config>) -> CargoResult<()> { + // If we spend a lot of time here (we shouldn't in most cases) then give + // a bit of a visual indicator as to what we're doing. Only enable this + // when stderr is a tty (a human is likely to be watching) to ensure we + // get deterministic output otherwise when observed by tools. + // + // Also note that we hit this loop a lot, so it's fairly performance + // sensitive. As a result try to defer a possibly expensive operation + // like `Instant::now` by only checking every N iterations of this loop + // to amortize the cost of the current time lookup. + self.ticks += 1; + if let Some(config) = config { + if config.shell().is_err_tty() + && !self.printed + && self.ticks % 1000 == 0 + && self.start.elapsed() - self.deps_time > self.time_to_print + { + self.printed = true; + config.shell().status("Resolving", "dependency graph...")?; + } + } + #[cfg(debug_assertions)] + { + // The largest test in our suite takes less then 5000 ticks + // with all the algorithm improvements. + // If any of them are removed then it takes more than I am willing to measure. + // So lets fail the test fast if we have ben running for two long. + assert!( + self.ticks < 50_000, + "got to 50_000 ticks in {:?}", + self.start.elapsed() + ); + // The largest test in our suite takes less then 30 sec + // with all the improvements to how fast a tick can go. + // If any of them are removed then it takes more than I am willing to measure. + // So lets fail the test fast if we have ben running for two long. + if self.ticks % 1000 == 0 { + assert!( + self.start.elapsed() - self.deps_time + < Duration::from_secs(self.slow_cpu_multiplier * 90) + ); + } + } + Ok(()) + } + pub fn elapsed(&mut self, dur: Duration) { + self.deps_time += dur; + } +} + +pub struct RegistryQueryer<'a> { + pub registry: &'a mut (dyn Registry + 'a), + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet, + cache: HashMap>>, + // If set the list of dependency candidates will be sorted by minimal + // versions first. That allows `cargo update -Z minimal-versions` which will + // specify minimum dependency versions to be used. + minimal_versions: bool, +} + +impl<'a> RegistryQueryer<'a> { + pub fn new( + registry: &'a mut dyn Registry, + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet, + minimal_versions: bool, + ) -> Self { + RegistryQueryer { + registry, + replacements, + cache: HashMap::new(), + try_to_use, + minimal_versions, + } + } + + /// Queries the `registry` to return a list of candidates for `dep`. + /// + /// This method is the location where overrides are taken into account. If + /// any candidates are returned which match an override then the override is + /// applied by performing a second query for what the override should + /// return. + pub fn query(&mut self, dep: &Dependency) -> CargoResult>> { + if let Some(out) = self.cache.get(dep).cloned() { + return Ok(out); + } + + let mut ret = Vec::new(); + self.registry.query( + dep, + &mut |s| { + ret.push(Candidate { + summary: s, + replace: None, + }); + }, + false, + )?; + for candidate in ret.iter_mut() { + let summary = &candidate.summary; + + let mut potential_matches = self + .replacements + .iter() + .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); + + let &(ref spec, ref dep) = match potential_matches.next() { + None => continue, + Some(replacement) => replacement, + }; + debug!( + "found an override for {} {}", + dep.package_name(), + dep.version_req() + ); + + let mut summaries = self.registry.query_vec(dep, false)?.into_iter(); + let s = summaries.next().ok_or_else(|| { + failure::format_err!( + "no matching package for override `{}` found\n\ + location searched: {}\n\ + version required: {}", + spec, + dep.source_id(), + dep.version_req() + ) + })?; + let summaries = summaries.collect::>(); + if !summaries.is_empty() { + let bullets = summaries + .iter() + .map(|s| format!(" * {}", s.package_id())) + .collect::>(); + failure::bail!( + "the replacement specification `{}` matched \ + multiple packages:\n * {}\n{}", + spec, + s.package_id(), + bullets.join("\n") + ); + } + + // The dependency should be hard-coded to have the same name and an + // exact version requirement, so both of these assertions should + // never fail. + assert_eq!(s.version(), summary.version()); + assert_eq!(s.name(), summary.name()); + + let replace = if s.source_id() == summary.source_id() { + debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); + None + } else { + Some(s) + }; + let matched_spec = spec.clone(); + + // Make sure no duplicates + if let Some(&(ref spec, _)) = potential_matches.next() { + failure::bail!( + "overlapping replacement specifications found:\n\n \ + * {}\n * {}\n\nboth specifications match: {}", + matched_spec, + spec, + summary.package_id() + ); + } + + for dep in summary.dependencies() { + debug!("\t{} => {}", dep.package_name(), dep.version_req()); + } + + candidate.replace = replace; + } + + // When we attempt versions for a package we'll want to do so in a + // sorted fashion to pick the "best candidates" first. Currently we try + // prioritized summaries (those in `try_to_use`) and failing that we + // list everything from the maximum version to the lowest version. + ret.sort_unstable_by(|a, b| { + let a_in_previous = self.try_to_use.contains(&a.summary.package_id()); + let b_in_previous = self.try_to_use.contains(&b.summary.package_id()); + let previous_cmp = a_in_previous.cmp(&b_in_previous).reverse(); + match previous_cmp { + Ordering::Equal => { + let cmp = a.summary.version().cmp(b.summary.version()); + if self.minimal_versions { + // Lower version ordered first. + cmp + } else { + // Higher version ordered first. + cmp.reverse() + } + } + _ => previous_cmp, + } + }); + + let out = Rc::new(ret); + + self.cache.insert(dep.clone(), out.clone()); + + Ok(out) + } +} + +#[derive(Clone, Copy)] +pub enum Method<'a> { + Everything, // equivalent to Required { dev_deps: true, all_features: true, .. } + Required { + dev_deps: bool, + features: &'a [InternedString], + all_features: bool, + uses_default_features: bool, + }, +} + +impl<'r> Method<'r> { + pub fn split_features(features: &[String]) -> Vec { + features + .iter() + .flat_map(|s| s.split_whitespace()) + .flat_map(|s| s.split(',')) + .filter(|s| !s.is_empty()) + .map(|s| InternedString::new(s)) + .collect::>() + } +} + +#[derive(Clone)] +pub struct Candidate { + pub summary: Summary, + pub replace: Option, +} + +#[derive(Clone)] +pub struct DepsFrame { + pub parent: Summary, + pub just_for_error_messages: bool, + pub remaining_siblings: RcVecIter, +} + +impl DepsFrame { + /// Returns the least number of candidates that any of this frame's siblings + /// has. + /// + /// The `remaining_siblings` array is already sorted with the smallest + /// number of candidates at the front, so we just return the number of + /// candidates in that entry. + fn min_candidates(&self) -> usize { + self.remaining_siblings + .peek() + .map(|(_, (_, candidates, _))| candidates.len()) + .unwrap_or(0) + } + + pub fn flatten<'a>(&'a self) -> impl Iterator + 'a { + self.remaining_siblings + .clone() + .map(move |(_, (d, _, _))| (self.parent.package_id(), d)) + } +} + +impl PartialEq for DepsFrame { + fn eq(&self, other: &DepsFrame) -> bool { + self.just_for_error_messages == other.just_for_error_messages + && self.min_candidates() == other.min_candidates() + } +} + +impl Eq for DepsFrame {} + +impl PartialOrd for DepsFrame { + fn partial_cmp(&self, other: &DepsFrame) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for DepsFrame { + fn cmp(&self, other: &DepsFrame) -> Ordering { + self.just_for_error_messages + .cmp(&other.just_for_error_messages) + .reverse() + .then_with(|| self.min_candidates().cmp(&other.min_candidates())) + } +} + +/// Note that a `OrdSet` is used for the remaining dependencies that need +/// activation. This set is sorted by how many candidates each dependency has. +/// +/// This helps us get through super constrained portions of the dependency +/// graph quickly and hopefully lock down what later larger dependencies can +/// use (those with more candidates). +#[derive(Clone)] +pub struct RemainingDeps { + /// a monotonic counter, increased for each new insertion. + time: u32, + /// the data is augmented by the insertion time. + /// This insures that no two items will cmp eq. + /// Forcing the OrdSet into a multi set. + data: im_rc::OrdSet<(DepsFrame, u32)>, +} + +impl RemainingDeps { + pub fn new() -> RemainingDeps { + RemainingDeps { + time: 0, + data: im_rc::OrdSet::new(), + } + } + pub fn push(&mut self, x: DepsFrame) { + let insertion_time = self.time; + self.data.insert((x, insertion_time)); + self.time += 1; + } + pub fn pop_most_constrained(&mut self) -> Option<(bool, (Summary, (usize, DepInfo)))> { + while let Some((mut deps_frame, insertion_time)) = self.data.remove_min() { + let just_here_for_the_error_messages = deps_frame.just_for_error_messages; + + // Figure out what our next dependency to activate is, and if nothing is + // listed then we're entirely done with this frame (yay!) and we can + // move on to the next frame. + if let Some(sibling) = deps_frame.remaining_siblings.next() { + let parent = Summary::clone(&deps_frame.parent); + self.data.insert((deps_frame, insertion_time)); + return Some((just_here_for_the_error_messages, (parent, sibling))); + } + } + None + } + pub fn iter<'a>(&'a mut self) -> impl Iterator + 'a { + self.data.iter().flat_map(|(other, _)| other.flatten()) + } +} + +// Information about the dependencies for a crate, a tuple of: +// +// (dependency info, candidates, features activated) +pub type DepInfo = (Dependency, Rc>, Rc>); + +/// All possible reasons that a package might fail to activate. +/// +/// We maintain a list of conflicts for error reporting as well as backtracking +/// purposes. Each reason here is why candidates may be rejected or why we may +/// fail to resolve a dependency. +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub enum ConflictReason { + /// There was a semver conflict, for example we tried to activate a package + /// 1.0.2 but 1.1.0 was already activated (aka a compatible semver version + /// is already activated) + Semver, + + /// The `links` key is being violated. For example one crate in the + /// dependency graph has `links = "foo"` but this crate also had that, and + /// we're only allowed one per dependency graph. + Links(InternedString), + + /// A dependency listed features that weren't actually available on the + /// candidate. For example we tried to activate feature `foo` but the + /// candidate we're activating didn't actually have the feature `foo`. + MissingFeatures(String), +} + +impl ConflictReason { + pub fn is_links(&self) -> bool { + if let ConflictReason::Links(_) = *self { + return true; + } + false + } + + pub fn is_missing_features(&self) -> bool { + if let ConflictReason::MissingFeatures(_) = *self { + return true; + } + false + } +} + +pub struct RcVecIter { + vec: Rc>, + rest: Range, +} + +impl RcVecIter { + pub fn new(vec: Rc>) -> RcVecIter { + RcVecIter { + rest: 0..vec.len(), + vec, + } + } + + fn peek(&self) -> Option<(usize, &T)> { + self.rest + .clone() + .next() + .and_then(|i| self.vec.get(i).map(|val| (i, &*val))) + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcVecIter { + fn clone(&self) -> RcVecIter { + RcVecIter { + vec: self.vec.clone(), + rest: self.rest.clone(), + } + } +} + +impl Iterator for RcVecIter +where + T: Clone, +{ + type Item = (usize, T); + + fn next(&mut self) -> Option { + self.rest + .next() + .and_then(|i| self.vec.get(i).map(|val| (i, val.clone()))) + } + + fn size_hint(&self) -> (usize, Option) { + // rest is a std::ops::Range, which is an ExactSizeIterator. + self.rest.size_hint() + } +} + +impl ExactSizeIterator for RcVecIter {} + +pub struct RcList { + pub head: Option)>>, +} + +impl RcList { + pub fn new() -> RcList { + RcList { head: None } + } + + pub fn push(&mut self, data: T) { + let node = Rc::new(( + data, + RcList { + head: self.head.take(), + }, + )); + self.head = Some(node); + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcList { + fn clone(&self) -> RcList { + RcList { + head: self.head.clone(), + } + } +} + +// Avoid stack overflows on drop by turning recursion into a loop +impl Drop for RcList { + fn drop(&mut self) { + let mut cur = self.head.take(); + while let Some(head) = cur { + match Rc::try_unwrap(head) { + Ok((_data, mut next)) => cur = next.head.take(), + Err(_) => break, + } + } + } +} + +pub enum GraphNode { + Add(PackageId), + Link(PackageId, PackageId, Dependency), +} diff --git a/src/cargo/core/shell.rs b/src/cargo/core/shell.rs new file mode 100644 index 000000000..d553848c5 --- /dev/null +++ b/src/cargo/core/shell.rs @@ -0,0 +1,467 @@ +use std::fmt; +use std::io::prelude::*; + +use atty; +use termcolor::Color::{Cyan, Green, Red, Yellow}; +use termcolor::{self, Color, ColorSpec, StandardStream, WriteColor}; + +use crate::util::errors::CargoResult; + +/// The requested verbosity of output. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Verbosity { + Verbose, + Normal, + Quiet, +} + +/// An abstraction around a `Write`able object that remembers preferences for output verbosity and +/// color. +pub struct Shell { + /// the `Write`able object, either with or without color support (represented by different enum + /// variants) + err: ShellOut, + /// How verbose messages should be + verbosity: Verbosity, + /// Flag that indicates the current line needs to be cleared before + /// printing. Used when a progress bar is currently displayed. + needs_clear: bool, +} + +impl fmt::Debug for Shell { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.err { + ShellOut::Write(_) => f + .debug_struct("Shell") + .field("verbosity", &self.verbosity) + .finish(), + ShellOut::Stream { color_choice, .. } => f + .debug_struct("Shell") + .field("verbosity", &self.verbosity) + .field("color_choice", &color_choice) + .finish(), + } + } +} + +/// A `Write`able object, either with or without color support +enum ShellOut { + /// A plain write object without color support + Write(Box), + /// Color-enabled stdio, with information on whether color should be used + Stream { + stream: StandardStream, + tty: bool, + color_choice: ColorChoice, + }, +} + +/// Whether messages should use color output +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ColorChoice { + /// Force color output + Always, + /// Force disable color output + Never, + /// Intelligently guess whether to use color output + CargoAuto, +} + +impl Shell { + /// Creates a new shell (color choice and verbosity), defaulting to 'auto' color and verbose + /// output. + pub fn new() -> Shell { + Shell { + err: ShellOut::Stream { + stream: StandardStream::stderr(ColorChoice::CargoAuto.to_termcolor_color_choice()), + color_choice: ColorChoice::CargoAuto, + tty: atty::is(atty::Stream::Stderr), + }, + verbosity: Verbosity::Verbose, + needs_clear: false, + } + } + + /// Creates a shell from a plain writable object, with no color, and max verbosity. + pub fn from_write(out: Box) -> Shell { + Shell { + err: ShellOut::Write(out), + verbosity: Verbosity::Verbose, + needs_clear: false, + } + } + + /// Prints a message, where the status will have `color` color, and can be justified. The + /// messages follows without color. + fn print( + &mut self, + status: &dyn fmt::Display, + message: Option<&dyn fmt::Display>, + color: Color, + justified: bool, + ) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => { + if self.needs_clear { + self.err_erase_line(); + } + self.err.print(status, message, color, justified) + } + } + } + + /// Sets whether the next print should clear the current line. + pub fn set_needs_clear(&mut self, needs_clear: bool) { + self.needs_clear = needs_clear; + } + + /// Returns `true` if the `needs_clear` flag is unset. + pub fn is_cleared(&self) -> bool { + !self.needs_clear + } + + /// Returns the width of the terminal in spaces, if any. + pub fn err_width(&self) -> Option { + match self.err { + ShellOut::Stream { tty: true, .. } => imp::stderr_width(), + _ => None, + } + } + + /// Returns `true` if stderr is a tty. + pub fn is_err_tty(&self) -> bool { + match self.err { + ShellOut::Stream { tty, .. } => tty, + _ => false, + } + } + + /// Gets a reference to the underlying writer. + pub fn err(&mut self) -> &mut dyn Write { + if self.needs_clear { + self.err_erase_line(); + } + self.err.as_write() + } + + /// Erase from cursor to end of line. + pub fn err_erase_line(&mut self) { + if let ShellOut::Stream { tty: true, .. } = self.err { + imp::err_erase_line(self); + self.needs_clear = false; + } + } + + /// Shortcut to right-align and color green a status message. + pub fn status(&mut self, status: T, message: U) -> CargoResult<()> + where + T: fmt::Display, + U: fmt::Display, + { + self.print(&status, Some(&message), Green, true) + } + + pub fn status_header(&mut self, status: T) -> CargoResult<()> + where + T: fmt::Display, + { + self.print(&status, None, Cyan, true) + } + + /// Shortcut to right-align a status message. + pub fn status_with_color( + &mut self, + status: T, + message: U, + color: Color, + ) -> CargoResult<()> + where + T: fmt::Display, + U: fmt::Display, + { + self.print(&status, Some(&message), color, true) + } + + /// Runs the callback only if we are in verbose mode. + pub fn verbose(&mut self, mut callback: F) -> CargoResult<()> + where + F: FnMut(&mut Shell) -> CargoResult<()>, + { + match self.verbosity { + Verbosity::Verbose => callback(self), + _ => Ok(()), + } + } + + /// Runs the callback if we are not in verbose mode. + pub fn concise(&mut self, mut callback: F) -> CargoResult<()> + where + F: FnMut(&mut Shell) -> CargoResult<()>, + { + match self.verbosity { + Verbosity::Verbose => Ok(()), + _ => callback(self), + } + } + + /// Prints a red 'error' message. + pub fn error(&mut self, message: T) -> CargoResult<()> { + self.print(&"error:", Some(&message), Red, false) + } + + /// Prints an amber 'warning' message. + pub fn warn(&mut self, message: T) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => self.print(&"warning:", Some(&message), Yellow, false), + } + } + + /// Updates the verbosity of the shell. + pub fn set_verbosity(&mut self, verbosity: Verbosity) { + self.verbosity = verbosity; + } + + /// Gets the verbosity of the shell. + pub fn verbosity(&self) -> Verbosity { + self.verbosity + } + + /// Updates the color choice (always, never, or auto) from a string.. + pub fn set_color_choice(&mut self, color: Option<&str>) -> CargoResult<()> { + if let ShellOut::Stream { + ref mut stream, + ref mut color_choice, + .. + } = self.err + { + let cfg = match color { + Some("always") => ColorChoice::Always, + Some("never") => ColorChoice::Never, + + Some("auto") | None => ColorChoice::CargoAuto, + + Some(arg) => failure::bail!( + "argument for --color must be auto, always, or \ + never, but found `{}`", + arg + ), + }; + *color_choice = cfg; + *stream = StandardStream::stderr(cfg.to_termcolor_color_choice()); + } + Ok(()) + } + + /// Gets the current color choice. + /// + /// If we are not using a color stream, this will always return `Never`, even if the color + /// choice has been set to something else. + pub fn color_choice(&self) -> ColorChoice { + match self.err { + ShellOut::Stream { color_choice, .. } => color_choice, + ShellOut::Write(_) => ColorChoice::Never, + } + } + + /// Whether the shell supports color. + pub fn supports_color(&self) -> bool { + match &self.err { + ShellOut::Write(_) => false, + ShellOut::Stream { stream, .. } => stream.supports_color(), + } + } + + /// Prints a message and translates ANSI escape code into console colors. + pub fn print_ansi(&mut self, message: &[u8]) -> CargoResult<()> { + if self.needs_clear { + self.err_erase_line(); + } + #[cfg(windows)] + { + if let ShellOut::Stream { stream, .. } = &mut self.err { + ::fwdansi::write_ansi(stream, message)?; + return Ok(()); + } + } + self.err().write_all(message)?; + Ok(()) + } +} + +impl Default for Shell { + fn default() -> Self { + Self::new() + } +} + +impl ShellOut { + /// Prints out a message with a status. The status comes first, and is bold plus the given + /// color. The status can be justified, in which case the max width that will right align is + /// 12 chars. + fn print( + &mut self, + status: &dyn fmt::Display, + message: Option<&dyn fmt::Display>, + color: Color, + justified: bool, + ) -> CargoResult<()> { + match *self { + ShellOut::Stream { ref mut stream, .. } => { + stream.reset()?; + stream.set_color(ColorSpec::new().set_bold(true).set_fg(Some(color)))?; + if justified { + write!(stream, "{:>12}", status)?; + } else { + write!(stream, "{}", status)?; + } + stream.reset()?; + match message { + Some(message) => writeln!(stream, " {}", message)?, + None => write!(stream, " ")?, + } + } + ShellOut::Write(ref mut w) => { + if justified { + write!(w, "{:>12}", status)?; + } else { + write!(w, "{}", status)?; + } + match message { + Some(message) => writeln!(w, " {}", message)?, + None => write!(w, " ")?, + } + } + } + Ok(()) + } + + /// Gets this object as a `io::Write`. + fn as_write(&mut self) -> &mut dyn Write { + match *self { + ShellOut::Stream { ref mut stream, .. } => stream, + ShellOut::Write(ref mut w) => w, + } + } +} + +impl ColorChoice { + /// Converts our color choice to termcolor's version. + fn to_termcolor_color_choice(self) -> termcolor::ColorChoice { + match self { + ColorChoice::Always => termcolor::ColorChoice::Always, + ColorChoice::Never => termcolor::ColorChoice::Never, + ColorChoice::CargoAuto => { + if atty::is(atty::Stream::Stderr) { + termcolor::ColorChoice::Auto + } else { + termcolor::ColorChoice::Never + } + } + } + } +} + +#[cfg(any(target_os = "linux", target_os = "macos"))] +mod imp { + use std::mem; + + use libc; + + use super::Shell; + + pub fn stderr_width() -> Option { + unsafe { + let mut winsize: libc::winsize = mem::zeroed(); + if libc::ioctl(libc::STDERR_FILENO, libc::TIOCGWINSZ, &mut winsize) < 0 { + return None; + } + if winsize.ws_col > 0 { + Some(winsize.ws_col as usize) + } else { + None + } + } + } + + pub fn err_erase_line(shell: &mut Shell) { + // This is the "EL - Erase in Line" sequence. It clears from the cursor + // to the end of line. + // https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences + let _ = shell.err.as_write().write_all(b"\x1B[K"); + } +} + +#[cfg(all(unix, not(any(target_os = "linux", target_os = "macos"))))] +mod imp { + pub(super) use super::default_err_erase_line as err_erase_line; + + pub fn stderr_width() -> Option { + None + } +} + +#[cfg(windows)] +mod imp { + use std::{cmp, mem, ptr}; + use winapi::um::fileapi::*; + use winapi::um::handleapi::*; + use winapi::um::processenv::*; + use winapi::um::winbase::*; + use winapi::um::wincon::*; + use winapi::um::winnt::*; + + pub(super) use super::default_err_erase_line as err_erase_line; + + pub fn stderr_width() -> Option { + unsafe { + let stdout = GetStdHandle(STD_ERROR_HANDLE); + let mut csbi: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); + if GetConsoleScreenBufferInfo(stdout, &mut csbi) != 0 { + return Some((csbi.srWindow.Right - csbi.srWindow.Left) as usize); + } + + // On mintty/msys/cygwin based terminals, the above fails with + // INVALID_HANDLE_VALUE. Use an alternate method which works + // in that case as well. + let h = CreateFileA( + "CONOUT$\0".as_ptr() as *const CHAR, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + ptr::null_mut(), + OPEN_EXISTING, + 0, + ptr::null_mut(), + ); + if h == INVALID_HANDLE_VALUE { + return None; + } + + let mut csbi: CONSOLE_SCREEN_BUFFER_INFO = mem::zeroed(); + let rc = GetConsoleScreenBufferInfo(h, &mut csbi); + CloseHandle(h); + if rc != 0 { + let width = (csbi.srWindow.Right - csbi.srWindow.Left) as usize; + // Unfortunately cygwin/mintty does not set the size of the + // backing console to match the actual window size. This + // always reports a size of 80 or 120 (not sure what + // determines that). Use a conservative max of 60 which should + // work in most circumstances. ConEmu does some magic to + // resize the console correctly, but there's no reasonable way + // to detect which kind of terminal we are running in, or if + // GetConsoleScreenBufferInfo returns accurate information. + return Some(cmp::min(60, width)); + } + None + } + } +} + +#[cfg(any(all(unix, not(any(target_os = "linux", target_os = "macos"))), windows,))] +fn default_err_erase_line(shell: &mut Shell) { + if let Some(max_width) = imp::stderr_width() { + let blank = " ".repeat(max_width); + drop(write!(shell.err.as_write(), "{}\r", blank)); + } +} diff --git a/src/cargo/core/source/mod.rs b/src/cargo/core/source/mod.rs new file mode 100644 index 000000000..f19bdf311 --- /dev/null +++ b/src/cargo/core/source/mod.rs @@ -0,0 +1,309 @@ +use std::collections::hash_map::HashMap; +use std::fmt; + +use crate::core::{Dependency, Package, PackageId, Summary}; +use crate::core::package::PackageSet; +use crate::util::{CargoResult, Config}; + +mod source_id; + +pub use self::source_id::{GitReference, SourceId}; + +/// Something that finds and downloads remote packages based on names and versions. +pub trait Source { + /// Returns the `SourceId` corresponding to this source. + fn source_id(&self) -> SourceId; + + /// Returns the replaced `SourceId` corresponding to this source. + fn replaced_source_id(&self) -> SourceId { + self.source_id() + } + + /// Returns whether or not this source will return summaries with + /// checksums listed. + fn supports_checksums(&self) -> bool; + + /// Returns whether or not this source will return summaries with + /// the `precise` field in the source id listed. + fn requires_precise(&self) -> bool; + + /// Attempts to find the packages that match a dependency request. + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()>; + + /// Attempts to find the packages that are close to a dependency request. + /// Each source gets to define what `close` means for it. + /// Path/Git sources may return all dependencies that are at that URI, + /// whereas an `Index` source may return dependencies that have the same canonicalization. + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()>; + + fn query_vec(&mut self, dep: &Dependency) -> CargoResult> { + let mut ret = Vec::new(); + self.query(dep, &mut |s| ret.push(s))?; + Ok(ret) + } + + /// Performs any network operations required to get the entire list of all names, + /// versions and dependencies of packages managed by the `Source`. + fn update(&mut self) -> CargoResult<()>; + + /// Fetches the full package for each name and version specified. + fn download(&mut self, package: PackageId) -> CargoResult; + + fn download_now(self: Box, package: PackageId, config: &Config) -> CargoResult + where + Self: std::marker::Sized, + { + let mut sources = SourceMap::new(); + sources.insert(self); + let pkg_set = PackageSet::new(&[package], sources, config)?; + Ok(pkg_set.get_one(package)?.clone()) + } + + fn finish_download(&mut self, package: PackageId, contents: Vec) -> CargoResult; + + /// Generates a unique string which represents the fingerprint of the + /// current state of the source. + /// + /// This fingerprint is used to determine the "fresheness" of the source + /// later on. It must be guaranteed that the fingerprint of a source is + /// constant if and only if the output product will remain constant. + /// + /// The `pkg` argument is the package which this fingerprint should only be + /// interested in for when this source may contain multiple packages. + fn fingerprint(&self, pkg: &Package) -> CargoResult; + + /// If this source supports it, verifies the source of the package + /// specified. + /// + /// Note that the source may also have performed other checksum-based + /// verification during the `download` step, but this is intended to be run + /// just before a crate is compiled so it may perform more expensive checks + /// which may not be cacheable. + fn verify(&self, _pkg: PackageId) -> CargoResult<()> { + Ok(()) + } + + /// Describes this source in a human readable fashion, used for display in + /// resolver error messages currently. + fn describe(&self) -> String; + + /// Returns whether a source is being replaced by another here. + fn is_replaced(&self) -> bool { + false + } + + /// Add a number of crates that should be whitelisted for showing up during + /// queries, even if they are yanked. Currently only applies to registry + /// sources. + fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]); +} + +pub enum MaybePackage { + Ready(Package), + Download { url: String, descriptor: String }, +} + +impl<'a, T: Source + ?Sized + 'a> Source for Box { + /// Forwards to `Source::source_id`. + fn source_id(&self) -> SourceId { + (**self).source_id() + } + + /// Forwards to `Source::replaced_source_id`. + fn replaced_source_id(&self) -> SourceId { + (**self).replaced_source_id() + } + + /// Forwards to `Source::supports_checksums`. + fn supports_checksums(&self) -> bool { + (**self).supports_checksums() + } + + /// Forwards to `Source::requires_precise`. + fn requires_precise(&self) -> bool { + (**self).requires_precise() + } + + /// Forwards to `Source::query`. + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + (**self).query(dep, f) + } + + /// Forwards to `Source::query`. + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + (**self).fuzzy_query(dep, f) + } + + /// Forwards to `Source::update`. + fn update(&mut self) -> CargoResult<()> { + (**self).update() + } + + /// Forwards to `Source::download`. + fn download(&mut self, id: PackageId) -> CargoResult { + (**self).download(id) + } + + fn finish_download(&mut self, id: PackageId, data: Vec) -> CargoResult { + (**self).finish_download(id, data) + } + + /// Forwards to `Source::fingerprint`. + fn fingerprint(&self, pkg: &Package) -> CargoResult { + (**self).fingerprint(pkg) + } + + /// Forwards to `Source::verify`. + fn verify(&self, pkg: PackageId) -> CargoResult<()> { + (**self).verify(pkg) + } + + fn describe(&self) -> String { + (**self).describe() + } + + fn is_replaced(&self) -> bool { + (**self).is_replaced() + } + + fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]) { + (**self).add_to_yanked_whitelist(pkgs); + } +} + +impl<'a, T: Source + ?Sized + 'a> Source for &'a mut T { + fn source_id(&self) -> SourceId { + (**self).source_id() + } + + fn replaced_source_id(&self) -> SourceId { + (**self).replaced_source_id() + } + + fn supports_checksums(&self) -> bool { + (**self).supports_checksums() + } + + fn requires_precise(&self) -> bool { + (**self).requires_precise() + } + + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + (**self).query(dep, f) + } + + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + (**self).fuzzy_query(dep, f) + } + + fn update(&mut self) -> CargoResult<()> { + (**self).update() + } + + fn download(&mut self, id: PackageId) -> CargoResult { + (**self).download(id) + } + + fn finish_download(&mut self, id: PackageId, data: Vec) -> CargoResult { + (**self).finish_download(id, data) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + (**self).fingerprint(pkg) + } + + fn verify(&self, pkg: PackageId) -> CargoResult<()> { + (**self).verify(pkg) + } + + fn describe(&self) -> String { + (**self).describe() + } + + fn is_replaced(&self) -> bool { + (**self).is_replaced() + } + + fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]) { + (**self).add_to_yanked_whitelist(pkgs); + } +} + +/// A `HashMap` of `SourceId` -> `Box`. +#[derive(Default)] +pub struct SourceMap<'src> { + map: HashMap>, +} + +// `impl Debug` on source requires specialization, if even desirable at all. +impl<'src> fmt::Debug for SourceMap<'src> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "SourceMap ")?; + f.debug_set().entries(self.map.keys()).finish() + } +} + +impl<'src> SourceMap<'src> { + /// Creates an empty map. + pub fn new() -> SourceMap<'src> { + SourceMap { + map: HashMap::new(), + } + } + + /// Like `HashMap::contains_key`. + pub fn contains(&self, id: SourceId) -> bool { + self.map.contains_key(&id) + } + + /// Like `HashMap::get`. + pub fn get(&self, id: SourceId) -> Option<&(dyn Source + 'src)> { + let source = self.map.get(&id); + + source.map(|s| { + let s: &(dyn Source + 'src) = &**s; + s + }) + } + + /// Like `HashMap::get_mut`. + pub fn get_mut(&mut self, id: SourceId) -> Option<&mut (dyn Source + 'src)> { + self.map.get_mut(&id).map(|s| { + let s: &mut (dyn Source + 'src) = &mut **s; + s + }) + } + + /// Like `HashMap::get`, but first calculates the `SourceId` from a `PackageId`. + pub fn get_by_package_id(&self, pkg_id: PackageId) -> Option<&(dyn Source + 'src)> { + self.get(pkg_id.source_id()) + } + + /// Like `HashMap::insert`, but derives the `SourceId` key from the `Source`. + pub fn insert(&mut self, source: Box) { + let id = source.source_id(); + self.map.insert(id, source); + } + + /// Like `HashMap::is_empty`. + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Like `HashMap::len`. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Like `HashMap::values`. + pub fn sources<'a>(&'a self) -> impl Iterator> { + self.map.values() + } + + /// Like `HashMap::iter_mut`. + pub fn sources_mut<'a>( + &'a mut self, + ) -> impl Iterator { + self.map.iter_mut().map(|(a, b)| (a, &mut **b)) + } +} diff --git a/src/cargo/core/source/source_id.rs b/src/cargo/core/source/source_id.rs new file mode 100644 index 000000000..dd5f5ff9a --- /dev/null +++ b/src/cargo/core/source/source_id.rs @@ -0,0 +1,589 @@ +use std::cmp::{self, Ordering}; +use std::collections::HashSet; +use std::fmt::{self, Formatter}; +use std::hash::{self, Hash}; +use std::path::Path; +use std::ptr; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering::SeqCst; +use std::sync::Mutex; + +use log::trace; +use serde::de; +use serde::ser; +use url::Url; + +use crate::core::PackageId; +use crate::ops; +use crate::sources::git; +use crate::sources::DirectorySource; +use crate::sources::{GitSource, PathSource, RegistrySource, CRATES_IO_INDEX}; +use crate::util::{CargoResult, Config, ToUrl}; + +lazy_static::lazy_static! { + static ref SOURCE_ID_CACHE: Mutex> = Mutex::new(HashSet::new()); +} + +/// Unique identifier for a source of packages. +#[derive(Clone, Copy, Eq, Debug)] +pub struct SourceId { + inner: &'static SourceIdInner, +} + +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +struct SourceIdInner { + /// The source URL. + url: Url, + /// The result of `git::canonicalize_url()` on `url` field. + canonical_url: Url, + /// The source kind. + kind: Kind, + /// For example, the exact Git revision of the specified branch for a Git Source. + precise: Option, + /// Name of the registry source for alternative registries + /// WARNING: this is not always set for alt-registries when the name is + /// not known. + name: Option, +} + +/// The possible kinds of code source. Along with `SourceIdInner`, this fully defines the +/// source. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +enum Kind { + /// A git repository. + Git(GitReference), + /// A local path.. + Path, + /// A remote registry. + Registry, + /// A local filesystem-based registry. + LocalRegistry, + /// A directory-based registry. + Directory, +} + +/// Information to find a specific commit in a Git repository. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum GitReference { + /// From a tag. + Tag(String), + /// From the HEAD of a branch. + Branch(String), + /// From a specific revision. + Rev(String), +} + +impl SourceId { + /// Creates a `SourceId` object from the kind and URL. + /// + /// The canonical url will be calculated, but the precise field will not + fn new(kind: Kind, url: Url) -> CargoResult { + let source_id = SourceId::wrap(SourceIdInner { + kind, + canonical_url: git::canonicalize_url(&url)?, + url, + precise: None, + name: None, + }); + Ok(source_id) + } + + fn wrap(inner: SourceIdInner) -> SourceId { + let mut cache = SOURCE_ID_CACHE.lock().unwrap(); + let inner = cache.get(&inner).cloned().unwrap_or_else(|| { + let inner = Box::leak(Box::new(inner)); + cache.insert(inner); + inner + }); + SourceId { inner } + } + + /// Parses a source URL and returns the corresponding ID. + /// + /// ## Example + /// + /// ``` + /// use cargo::core::SourceId; + /// SourceId::from_url("git+https://github.com/alexcrichton/\ + /// libssh2-static-sys#80e71a3021618eb05\ + /// 656c58fb7c5ef5f12bc747f"); + /// ``` + pub fn from_url(string: &str) -> CargoResult { + let mut parts = string.splitn(2, '+'); + let kind = parts.next().unwrap(); + let url = parts + .next() + .ok_or_else(|| failure::format_err!("invalid source `{}`", string))?; + + match kind { + "git" => { + let mut url = url.to_url()?; + let mut reference = GitReference::Branch("master".to_string()); + for (k, v) in url.query_pairs() { + match &k[..] { + // Map older 'ref' to branch. + "branch" | "ref" => reference = GitReference::Branch(v.into_owned()), + + "rev" => reference = GitReference::Rev(v.into_owned()), + "tag" => reference = GitReference::Tag(v.into_owned()), + _ => {} + } + } + let precise = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + url.set_query(None); + Ok(SourceId::for_git(&url, reference)?.with_precise(precise)) + } + "registry" => { + let url = url.to_url()?; + Ok(SourceId::new(Kind::Registry, url)?.with_precise(Some("locked".to_string()))) + } + "path" => { + let url = url.to_url()?; + SourceId::new(Kind::Path, url) + } + kind => Err(failure::format_err!( + "unsupported source protocol: {}", + kind + )), + } + } + + /// A view of the `SourceId` that can be `Display`ed as a URL. + pub fn to_url(&self) -> SourceIdToUrl<'_> { + SourceIdToUrl { + inner: &*self.inner, + } + } + + /// Creates a `SourceId` from a filesystem path. + /// + /// `path`: an absolute path. + pub fn for_path(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Path, url) + } + + /// Creates a `SourceId` from a Git reference. + pub fn for_git(url: &Url, reference: GitReference) -> CargoResult { + SourceId::new(Kind::Git(reference), url.clone()) + } + + /// Creates a SourceId from a registry URL. + pub fn for_registry(url: &Url) -> CargoResult { + SourceId::new(Kind::Registry, url.clone()) + } + + /// Creates a SourceId from a local registry path. + pub fn for_local_registry(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::LocalRegistry, url) + } + + /// Creates a `SourceId` from a directory path. + pub fn for_directory(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Directory, url) + } + + /// Returns the `SourceId` corresponding to the main repository. + /// + /// This is the main cargo registry by default, but it can be overridden in + /// a `.cargo/config`. + pub fn crates_io(config: &Config) -> CargoResult { + config.crates_io_source_id(|| { + let cfg = ops::registry_configuration(config, None)?; + let url = if let Some(ref index) = cfg.index { + static WARNED: AtomicBool = AtomicBool::new(false); + if !WARNED.swap(true, SeqCst) { + config.shell().warn( + "custom registry support via \ + the `registry.index` configuration is \ + being removed, this functionality \ + will not work in the future", + )?; + } + &index[..] + } else { + CRATES_IO_INDEX + }; + let url = url.to_url()?; + SourceId::for_registry(&url) + }) + } + + pub fn alt_registry(config: &Config, key: &str) -> CargoResult { + let url = config.get_registry_index(key)?; + Ok(SourceId::wrap(SourceIdInner { + kind: Kind::Registry, + canonical_url: git::canonicalize_url(&url)?, + url, + precise: None, + name: Some(key.to_string()), + })) + } + + /// Gets this source URL. + pub fn url(&self) -> &Url { + &self.inner.url + } + + pub fn display_registry(self) -> String { + if self.is_default_registry() { + "crates.io index".to_string() + } else { + format!("`{}` index", url_display(self.url())) + } + } + + /// Returns `true` if this source is from a filesystem path. + pub fn is_path(self) -> bool { + self.inner.kind == Kind::Path + } + + /// Returns `true` if this source is from a registry (either local or not). + pub fn is_registry(self) -> bool { + match self.inner.kind { + Kind::Registry | Kind::LocalRegistry => true, + _ => false, + } + } + + /// Returns `true` if this source from a Git repository. + pub fn is_git(self) -> bool { + match self.inner.kind { + Kind::Git(_) => true, + _ => false, + } + } + + /// Creates an implementation of `Source` corresponding to this ID. + pub fn load<'a>( + self, + config: &'a Config, + yanked_whitelist: &HashSet, + ) -> CargoResult> { + trace!("loading SourceId; {}", self); + match self.inner.kind { + Kind::Git(..) => Ok(Box::new(GitSource::new(self, config)?)), + Kind::Path => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(PathSource::new(&path, self, config))) + } + Kind::Registry => Ok(Box::new(RegistrySource::remote( + self, + yanked_whitelist, + config, + ))), + Kind::LocalRegistry => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(RegistrySource::local( + self, + &path, + yanked_whitelist, + config, + ))) + } + Kind::Directory => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(DirectorySource::new(&path, self, config))) + } + } + } + + /// Gets the value of the precise field. + pub fn precise(self) -> Option<&'static str> { + self.inner.precise.as_ref().map(|s| &s[..]) + } + + /// Gets the Git reference if this is a git source, otherwise `None`. + pub fn git_reference(self) -> Option<&'static GitReference> { + match self.inner.kind { + Kind::Git(ref s) => Some(s), + _ => None, + } + } + + /// Creates a new `SourceId` from this source with the given `precise`. + pub fn with_precise(self, v: Option) -> SourceId { + SourceId::wrap(SourceIdInner { + precise: v, + ..(*self.inner).clone() + }) + } + + /// Returns `true` if the remote registry is the standard . + pub fn is_default_registry(self) -> bool { + match self.inner.kind { + Kind::Registry => {} + _ => return false, + } + self.inner.url.to_string() == CRATES_IO_INDEX + } + + /// Hashes `self`. + /// + /// For paths, remove the workspace prefix so the same source will give the + /// same hash in different locations. + pub fn stable_hash(self, workspace: &Path, into: &mut S) { + if self.is_path() { + if let Ok(p) = self + .inner + .url + .to_file_path() + .unwrap() + .strip_prefix(workspace) + { + self.inner.kind.hash(into); + p.to_str().unwrap().hash(into); + return; + } + } + self.hash(into) + } + + pub fn full_eq(self, other: SourceId) -> bool { + ptr::eq(self.inner, other.inner) + } + + pub fn full_hash(self, into: &mut S) { + ptr::NonNull::from(self.inner).hash(into) + } +} + +impl PartialOrd for SourceId { + fn partial_cmp(&self, other: &SourceId) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceId { + fn cmp(&self, other: &SourceId) -> Ordering { + self.inner.cmp(&other.inner) + } +} + +impl ser::Serialize for SourceId { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + if self.is_path() { + None::.serialize(s) + } else { + s.collect_str(&self.to_url()) + } + } +} + +impl<'de> de::Deserialize<'de> for SourceId { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + SourceId::from_url(&string).map_err(de::Error::custom) + } +} + +fn url_display(url: &Url) -> String { + if url.scheme() == "file" { + if let Ok(path) = url.to_file_path() { + if let Some(path_str) = path.to_str() { + return path_str.to_string(); + } + } + } + + url.as_str().to_string() +} + +impl fmt::Display for SourceId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self.inner.kind { + Kind::Git(ref reference) => { + // Don't replace the URL display for git references, + // because those are kind of expected to be URLs. + write!(f, "{}", self.inner.url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + + if let Some(ref s) = self.inner.precise { + let len = cmp::min(s.len(), 8); + write!(f, "#{}", &s[..len])?; + } + Ok(()) + } + Kind::Path => write!(f, "{}", url_display(&self.inner.url)), + Kind::Registry => write!(f, "registry `{}`", url_display(&self.inner.url)), + Kind::LocalRegistry => write!(f, "registry `{}`", url_display(&self.inner.url)), + Kind::Directory => write!(f, "dir {}", url_display(&self.inner.url)), + } + } +} + +// Custom equality defined as canonical URL equality for git sources and +// URL equality for other sources, ignoring the `precise` and `name` fields. +impl PartialEq for SourceId { + fn eq(&self, other: &SourceId) -> bool { + if ptr::eq(self.inner, other.inner) { + return true; + } + if self.inner.kind != other.inner.kind { + return false; + } + if self.inner.url == other.inner.url { + return true; + } + + match (&self.inner.kind, &other.inner.kind) { + (Kind::Git(ref1), Kind::Git(ref2)) => { + ref1 == ref2 && self.inner.canonical_url == other.inner.canonical_url + } + _ => false, + } + } +} + +impl PartialOrd for SourceIdInner { + fn partial_cmp(&self, other: &SourceIdInner) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceIdInner { + fn cmp(&self, other: &SourceIdInner) -> Ordering { + match self.kind.cmp(&other.kind) { + Ordering::Equal => {} + ord => return ord, + } + match self.url.cmp(&other.url) { + Ordering::Equal => {} + ord => return ord, + } + match (&self.kind, &other.kind) { + (Kind::Git(ref1), Kind::Git(ref2)) => { + (ref1, &self.canonical_url).cmp(&(ref2, &other.canonical_url)) + } + _ => self.kind.cmp(&other.kind), + } + } +} + +// The hash of SourceId is used in the name of some Cargo folders, so shouldn't +// vary. `as_str` gives the serialisation of a url (which has a spec) and so +// insulates against possible changes in how the url crate does hashing. +impl Hash for SourceId { + fn hash(&self, into: &mut S) { + self.inner.kind.hash(into); + match self.inner.kind { + Kind::Git(_) => self.inner.canonical_url.as_str().hash(into), + _ => self.inner.url.as_str().hash(into), + } + } +} + +/// A `Display`able view into a `SourceId` that will write it as a url +pub struct SourceIdToUrl<'a> { + inner: &'a SourceIdInner, +} + +impl<'a> fmt::Display for SourceIdToUrl<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self.inner { + SourceIdInner { + kind: Kind::Path, + ref url, + .. + } => write!(f, "path+{}", url), + SourceIdInner { + kind: Kind::Git(ref reference), + ref url, + ref precise, + .. + } => { + write!(f, "git+{}", url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + if let Some(precise) = precise.as_ref() { + write!(f, "#{}", precise)?; + } + Ok(()) + } + SourceIdInner { + kind: Kind::Registry, + ref url, + .. + } => write!(f, "registry+{}", url), + SourceIdInner { + kind: Kind::LocalRegistry, + ref url, + .. + } => write!(f, "local-registry+{}", url), + SourceIdInner { + kind: Kind::Directory, + ref url, + .. + } => write!(f, "directory+{}", url), + } + } +} + +impl GitReference { + /// Returns a `Display`able view of this git reference, or None if using + /// the head of the "master" branch + pub fn pretty_ref(&self) -> Option> { + match *self { + GitReference::Branch(ref s) if *s == "master" => None, + _ => Some(PrettyRef { inner: self }), + } + } +} + +/// A git reference that can be `Display`ed +pub struct PrettyRef<'a> { + inner: &'a GitReference, +} + +impl<'a> fmt::Display for PrettyRef<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self.inner { + GitReference::Branch(ref b) => write!(f, "branch={}", b), + GitReference::Tag(ref s) => write!(f, "tag={}", s), + GitReference::Rev(ref s) => write!(f, "rev={}", s), + } + } +} + +#[cfg(test)] +mod tests { + use super::{GitReference, Kind, SourceId}; + use crate::util::ToUrl; + + #[test] + fn github_sources_equal() { + let loc = "https://github.com/foo/bar".to_url().unwrap(); + let master = Kind::Git(GitReference::Branch("master".to_string())); + let s1 = SourceId::new(master.clone(), loc).unwrap(); + + let loc = "git://github.com/foo/bar".to_url().unwrap(); + let s2 = SourceId::new(master, loc.clone()).unwrap(); + + assert_eq!(s1, s2); + + let foo = Kind::Git(GitReference::Branch("foo".to_string())); + let s3 = SourceId::new(foo, loc).unwrap(); + assert_ne!(s1, s3); + } +} diff --git a/src/cargo/core/summary.rs b/src/cargo/core/summary.rs new file mode 100644 index 000000000..65a1a6bfc --- /dev/null +++ b/src/cargo/core/summary.rs @@ -0,0 +1,411 @@ +use std::borrow::Borrow; +use std::collections::{BTreeMap, HashMap}; +use std::fmt::Display; +use std::mem; +use std::rc::Rc; + +use serde::{Serialize, Serializer}; + +use crate::core::interning::InternedString; +use crate::core::{Dependency, PackageId, SourceId}; +use semver::Version; + +use crate::util::CargoResult; + +/// Subset of a `Manifest`. Contains only the most important information about +/// a package. +/// +/// Summaries are cloned, and should not be mutated after creation +#[derive(Debug, Clone)] +pub struct Summary { + inner: Rc, +} + +#[derive(Debug, Clone)] +struct Inner { + package_id: PackageId, + dependencies: Vec, + features: FeatureMap, + checksum: Option, + links: Option, + namespaced_features: bool, +} + +impl Summary { + pub fn new( + pkg_id: PackageId, + dependencies: Vec, + features: &BTreeMap>>, + links: Option>, + namespaced_features: bool, + ) -> CargoResult + where + K: Borrow + Ord + Display, + { + for dep in dependencies.iter() { + let feature = dep.name_in_toml(); + if !namespaced_features && features.get(&*feature).is_some() { + failure::bail!( + "Features and dependencies cannot have the \ + same name: `{}`", + feature + ) + } + if dep.is_optional() && !dep.is_transitive() { + failure::bail!( + "Dev-dependencies are not allowed to be optional: `{}`", + feature + ) + } + } + let feature_map = build_feature_map(&features, &dependencies, namespaced_features)?; + Ok(Summary { + inner: Rc::new(Inner { + package_id: pkg_id, + dependencies, + features: feature_map, + checksum: None, + links: links.map(|l| InternedString::new(l.as_ref())), + namespaced_features, + }), + }) + } + + pub fn package_id(&self) -> PackageId { + self.inner.package_id + } + pub fn name(&self) -> InternedString { + self.package_id().name() + } + pub fn version(&self) -> &Version { + self.package_id().version() + } + pub fn source_id(&self) -> SourceId { + self.package_id().source_id() + } + pub fn dependencies(&self) -> &[Dependency] { + &self.inner.dependencies + } + pub fn features(&self) -> &FeatureMap { + &self.inner.features + } + pub fn checksum(&self) -> Option<&str> { + self.inner.checksum.as_ref().map(|s| &s[..]) + } + pub fn links(&self) -> Option { + self.inner.links + } + pub fn namespaced_features(&self) -> bool { + self.inner.namespaced_features + } + + pub fn override_id(mut self, id: PackageId) -> Summary { + Rc::make_mut(&mut self.inner).package_id = id; + self + } + + pub fn set_checksum(mut self, cksum: String) -> Summary { + Rc::make_mut(&mut self.inner).checksum = Some(cksum); + self + } + + pub fn map_dependencies(mut self, f: F) -> Summary + where + F: FnMut(Dependency) -> Dependency, + { + { + let slot = &mut Rc::make_mut(&mut self.inner).dependencies; + let deps = mem::replace(slot, Vec::new()); + *slot = deps.into_iter().map(f).collect(); + } + self + } + + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Summary { + let me = if self.package_id().source_id() == to_replace { + let new_id = self.package_id().with_source_id(replace_with); + self.override_id(new_id) + } else { + self + }; + me.map_dependencies(|dep| dep.map_source(to_replace, replace_with)) + } +} + +impl PartialEq for Summary { + fn eq(&self, other: &Summary) -> bool { + self.inner.package_id == other.inner.package_id + } +} + +// Checks features for errors, bailing out a CargoResult:Err if invalid, +// and creates FeatureValues for each feature. +fn build_feature_map( + features: &BTreeMap>>, + dependencies: &[Dependency], + namespaced: bool, +) -> CargoResult +where + K: Borrow + Ord + Display, +{ + use self::FeatureValue::*; + let mut dep_map = HashMap::new(); + for dep in dependencies.iter() { + dep_map + .entry(dep.name_in_toml()) + .or_insert_with(Vec::new) + .push(dep); + } + + let mut map = BTreeMap::new(); + for (feature, list) in features.iter() { + // If namespaced features is active and the key is the same as that of an + // optional dependency, that dependency must be included in the values. + // Thus, if a `feature` is found that has the same name as a dependency, we + // (a) bail out if the dependency is non-optional, and (b) we track if the + // feature requirements include the dependency `crate:feature` in the list. + // This is done with the `dependency_found` variable, which can only be + // false if features are namespaced and the current feature key is the same + // as the name of an optional dependency. If so, it gets set to true during + // iteration over the list if the dependency is found in the list. + let mut dependency_found = if namespaced { + match dep_map.get(feature.borrow()) { + Some(ref dep_data) => { + if !dep_data.iter().any(|d| d.is_optional()) { + failure::bail!( + "Feature `{}` includes the dependency of the same name, but this is \ + left implicit in the features included by this feature.\n\ + Additionally, the dependency must be marked as optional to be \ + included in the feature definition.\n\ + Consider adding `crate:{}` to this feature's requirements \ + and marking the dependency as `optional = true`", + feature, + feature + ) + } else { + false + } + } + None => true, + } + } else { + true + }; + + let mut values = vec![]; + for dep in list { + let val = FeatureValue::build( + InternedString::new(dep.as_ref()), + |fs| features.contains_key(fs.as_str()), + namespaced, + ); + + // Find data for the referenced dependency... + let dep_data = { + match val { + Feature(ref dep_name) | Crate(ref dep_name) | CrateFeature(ref dep_name, _) => { + dep_map.get(dep_name.as_str()) + } + } + }; + let is_optional_dep = dep_data + .iter() + .flat_map(|d| d.iter()) + .any(|d| d.is_optional()); + if let FeatureValue::Crate(ref dep_name) = val { + // If we have a dependency value, check if this is the dependency named + // the same as the feature that we were looking for. + if !dependency_found && feature.borrow() == dep_name.as_str() { + dependency_found = true; + } + } + + match (&val, dep_data.is_some(), is_optional_dep) { + // The value is a feature. If features are namespaced, this just means + // it's not prefixed with `crate:`, so we have to check whether the + // feature actually exist. If the feature is not defined *and* an optional + // dependency of the same name exists, the feature is defined implicitly + // here by adding it to the feature map, pointing to the dependency. + // If features are not namespaced, it's been validated as a feature already + // while instantiating the `FeatureValue` in `FeatureValue::build()`, so + // we don't have to do so here. + (&Feature(feat), _, true) => { + if namespaced && !features.contains_key(&*feat) { + map.insert(feat, vec![FeatureValue::Crate(feat)]); + } + } + // If features are namespaced and the value is not defined as a feature + // and there is no optional dependency of the same name, error out. + // If features are not namespaced, there must be an existing feature + // here (checked by `FeatureValue::build()`), so it will always be defined. + (&Feature(feat), dep_exists, false) => { + if namespaced && !features.contains_key(&*feat) { + if dep_exists { + failure::bail!( + "Feature `{}` includes `{}` which is not defined as a feature.\n\ + A non-optional dependency of the same name is defined; consider \ + adding `optional = true` to its definition", + feature, + feat + ) + } else { + failure::bail!( + "Feature `{}` includes `{}` which is not defined as a feature", + feature, + feat + ) + } + } + } + // The value is a dependency. If features are namespaced, it is explicitly + // tagged as such (`crate:value`). If features are not namespaced, any value + // not recognized as a feature is pegged as a `Crate`. Here we handle the case + // where the dependency exists but is non-optional. It branches on namespaced + // just to provide the correct string for the crate dependency in the error. + (&Crate(ref dep), true, false) => { + if namespaced { + failure::bail!( + "Feature `{}` includes `crate:{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + } else { + failure::bail!( + "Feature `{}` depends on `{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + } + } + // If namespaced, the value was tagged as a dependency; if not namespaced, + // this could be anything not defined as a feature. This handles the case + // where no such dependency is actually defined; again, the branch on + // namespaced here is just to provide the correct string in the error. + (&Crate(ref dep), false, _) => { + if namespaced { + failure::bail!( + "Feature `{}` includes `crate:{}` which is not a known \ + dependency", + feature, + dep + ) + } else { + failure::bail!( + "Feature `{}` includes `{}` which is neither a dependency nor \ + another feature", + feature, + dep + ) + } + } + (&Crate(_), true, true) => {} + // If the value is a feature for one of the dependencies, bail out if no such + // dependency is actually defined in the manifest. + (&CrateFeature(ref dep, _), false, _) => failure::bail!( + "Feature `{}` requires a feature of `{}` which is not a \ + dependency", + feature, + dep + ), + (&CrateFeature(_, _), true, _) => {} + } + values.push(val); + } + + if !dependency_found { + // If we have not found the dependency of the same-named feature, we should + // bail here. + failure::bail!( + "Feature `{}` includes the optional dependency of the \ + same name, but this is left implicit in the features \ + included by this feature.\nConsider adding \ + `crate:{}` to this feature's requirements.", + feature, + feature + ) + } + + map.insert(InternedString::new(feature.borrow()), values); + } + Ok(map) +} + +/// FeatureValue represents the types of dependencies a feature can have: +/// +/// * Another feature +/// * An optional dependency +/// * A feature in a dependency +/// +/// The selection between these 3 things happens as part of the construction of the FeatureValue. +#[derive(Clone, Debug)] +pub enum FeatureValue { + Feature(InternedString), + Crate(InternedString), + CrateFeature(InternedString, InternedString), +} + +impl FeatureValue { + fn build(feature: InternedString, is_feature: T, namespaced: bool) -> FeatureValue + where + T: Fn(InternedString) -> bool, + { + match (feature.find('/'), namespaced) { + (Some(pos), _) => { + let (dep, dep_feat) = feature.split_at(pos); + let dep_feat = &dep_feat[1..]; + FeatureValue::CrateFeature(InternedString::new(dep), InternedString::new(dep_feat)) + } + (None, true) if feature.starts_with("crate:") => { + FeatureValue::Crate(InternedString::new(&feature[6..])) + } + (None, true) => FeatureValue::Feature(feature), + (None, false) if is_feature(feature) => FeatureValue::Feature(feature), + (None, false) => FeatureValue::Crate(feature), + } + } + + pub fn new(feature: InternedString, s: &Summary) -> FeatureValue { + Self::build( + feature, + |fs| s.features().contains_key(&fs), + s.namespaced_features(), + ) + } + + pub fn to_string(&self, s: &Summary) -> String { + use self::FeatureValue::*; + match *self { + Feature(ref f) => f.to_string(), + Crate(ref c) => { + if s.namespaced_features() { + format!("crate:{}", &c) + } else { + c.to_string() + } + } + CrateFeature(ref c, ref f) => [c.as_ref(), f.as_ref()].join("/"), + } + } +} + +impl Serialize for FeatureValue { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use self::FeatureValue::*; + match *self { + Feature(ref f) => serializer.serialize_str(f), + Crate(ref c) => serializer.serialize_str(c), + CrateFeature(ref c, ref f) => { + serializer.serialize_str(&[c.as_ref(), f.as_ref()].join("/")) + } + } + } +} + +pub type FeatureMap = BTreeMap>; diff --git a/src/cargo/core/workspace.rs b/src/cargo/core/workspace.rs new file mode 100644 index 000000000..76bedfaf3 --- /dev/null +++ b/src/cargo/core/workspace.rs @@ -0,0 +1,910 @@ +use std::cell::RefCell; +use std::collections::hash_map::{Entry, HashMap}; +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; +use std::slice; + +use glob::glob; +use log::debug; +use url::Url; + +use crate::core::profiles::Profiles; +use crate::core::registry::PackageRegistry; +use crate::core::{Dependency, PackageIdSpec}; +use crate::core::{EitherManifest, Package, SourceId, VirtualManifest}; +use crate::ops; +use crate::sources::PathSource; +use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; +use crate::util::paths; +use crate::util::toml::read_manifest; +use crate::util::{Config, Filesystem}; + +/// The core abstraction in Cargo for working with a workspace of crates. +/// +/// A workspace is often created very early on and then threaded through all +/// other functions. It's typically through this object that the current +/// package is loaded and/or learned about. +#[derive(Debug)] +pub struct Workspace<'cfg> { + config: &'cfg Config, + + // This path is a path to where the current cargo subcommand was invoked + // from. That is the `--manifest-path` argument to Cargo, and + // points to the "main crate" that we're going to worry about. + current_manifest: PathBuf, + + // A list of packages found in this workspace. Always includes at least the + // package mentioned by `current_manifest`. + packages: Packages<'cfg>, + + // If this workspace includes more than one crate, this points to the root + // of the workspace. This is `None` in the case that `[workspace]` is + // missing, `package.workspace` is missing, and no `Cargo.toml` above + // `current_manifest` was found on the filesystem with `[workspace]`. + root_manifest: Option, + + // Shared target directory for all the packages of this workspace. + // `None` if the default path of `root/target` should be used. + target_dir: Option, + + // List of members in this workspace with a listing of all their manifest + // paths. The packages themselves can be looked up through the `packages` + // set above. + members: Vec, + + // The subset of `members` that are used by the + // `build`, `check`, `test`, and `bench` subcommands + // when no package is selected with `--package` / `-p` and `--all` + // is not used. + // + // This is set by the `default-members` config + // in the `[workspace]` section. + // When unset, this is the same as `members` for virtual workspaces + // (`--all` is implied) + // or only the root package for non-virtual workspaces. + default_members: Vec, + + // `true` if this is a temporary workspace created for the purposes of the + // `cargo install` or `cargo package` commands. + is_ephemeral: bool, + + // `true` if this workspace should enforce optional dependencies even when + // not needed; false if this workspace should only enforce dependencies + // needed by the current configuration (such as in cargo install). In some + // cases `false` also results in the non-enforcement of dev-dependencies. + require_optional_deps: bool, + + // A cache of loaded packages for particular paths which is disjoint from + // `packages` up above, used in the `load` method down below. + loaded_packages: RefCell>, +} + +// Separate structure for tracking loaded packages (to avoid loading anything +// twice), and this is separate to help appease the borrow checker. +#[derive(Debug)] +struct Packages<'cfg> { + config: &'cfg Config, + packages: HashMap, +} + +#[derive(Debug)] +enum MaybePackage { + Package(Package), + Virtual(VirtualManifest), +} + +/// Configuration of a workspace in a manifest. +#[derive(Debug, Clone)] +pub enum WorkspaceConfig { + /// Indicates that `[workspace]` was present and the members were + /// optionally specified as well. + Root(WorkspaceRootConfig), + + /// Indicates that `[workspace]` was present and the `root` field is the + /// optional value of `package.workspace`, if present. + Member { root: Option }, +} + +/// Intermediate configuration of a workspace root in a manifest. +/// +/// Knows the Workspace Root path, as well as `members` and `exclude` lists of path patterns, which +/// together tell if some path is recognized as a member by this root or not. +#[derive(Debug, Clone)] +pub struct WorkspaceRootConfig { + root_dir: PathBuf, + members: Option>, + default_members: Option>, + exclude: Vec, +} + +/// An iterator over the member packages of a workspace, returned by +/// `Workspace::members` +pub struct Members<'a, 'cfg: 'a> { + ws: &'a Workspace<'cfg>, + iter: slice::Iter<'a, PathBuf>, +} + +impl<'cfg> Workspace<'cfg> { + /// Creates a new workspace given the target manifest pointed to by + /// `manifest_path`. + /// + /// This function will construct the entire workspace by determining the + /// root and all member packages. It will then validate the workspace + /// before returning it, so `Ok` is only returned for valid workspaces. + pub fn new(manifest_path: &Path, config: &'cfg Config) -> CargoResult> { + let target_dir = config.target_dir()?; + + let mut ws = Workspace { + config, + current_manifest: manifest_path.to_path_buf(), + packages: Packages { + config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir, + members: Vec::new(), + default_members: Vec::new(), + is_ephemeral: false, + require_optional_deps: true, + loaded_packages: RefCell::new(HashMap::new()), + }; + ws.root_manifest = ws.find_root(manifest_path)?; + ws.find_members()?; + ws.validate()?; + Ok(ws) + } + + /// Creates a "temporary workspace" from one package which only contains + /// that package. + /// + /// This constructor will not touch the filesystem and only creates an + /// in-memory workspace. That is, all configuration is ignored, it's just + /// intended for that one package. + /// + /// This is currently only used in niche situations like `cargo install` or + /// `cargo package`. + pub fn ephemeral( + package: Package, + config: &'cfg Config, + target_dir: Option, + require_optional_deps: bool, + ) -> CargoResult> { + let mut ws = Workspace { + config, + current_manifest: package.manifest_path().to_path_buf(), + packages: Packages { + config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir: None, + members: Vec::new(), + default_members: Vec::new(), + is_ephemeral: true, + require_optional_deps, + loaded_packages: RefCell::new(HashMap::new()), + }; + { + let key = ws.current_manifest.parent().unwrap(); + let package = MaybePackage::Package(package); + ws.packages.packages.insert(key.to_path_buf(), package); + ws.target_dir = if let Some(dir) = target_dir { + Some(dir) + } else { + ws.config.target_dir()? + }; + ws.members.push(ws.current_manifest.clone()); + ws.default_members.push(ws.current_manifest.clone()); + } + Ok(ws) + } + + /// Returns the current package of this workspace. + /// + /// Note that this can return an error if it the current manifest is + /// actually a "virtual Cargo.toml", in which case an error is returned + /// indicating that something else should be passed. + pub fn current(&self) -> CargoResult<&Package> { + let pkg = self.current_opt().ok_or_else(|| { + failure::format_err!( + "manifest path `{}` is a virtual manifest, but this \ + command requires running against an actual package in \ + this workspace", + self.current_manifest.display() + ) + })?; + Ok(pkg) + } + + pub fn current_opt(&self) -> Option<&Package> { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(ref p) => Some(p), + MaybePackage::Virtual(..) => None, + } + } + + pub fn is_virtual(&self) -> bool { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(..) => false, + MaybePackage::Virtual(..) => true, + } + } + + /// Returns the `Config` this workspace is associated with. + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn profiles(&self) -> &Profiles { + match self.root_maybe() { + MaybePackage::Package(p) => p.manifest().profiles(), + MaybePackage::Virtual(vm) => vm.profiles(), + } + } + + /// Returns the root path of this workspace. + /// + /// That is, this returns the path of the directory containing the + /// `Cargo.toml` which is the root of this workspace. + pub fn root(&self) -> &Path { + match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + } + .parent() + .unwrap() + } + + /// Returns the root Package or VirtualManifest. + fn root_maybe(&self) -> &MaybePackage { + let root = self + .root_manifest + .as_ref() + .unwrap_or(&self.current_manifest); + self.packages.get(root) + } + + pub fn target_dir(&self) -> Filesystem { + self.target_dir + .clone() + .unwrap_or_else(|| Filesystem::new(self.root().join("target"))) + } + + /// Returns the root [replace] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_replace(&self) -> &[(PackageIdSpec, Dependency)] { + match self.root_maybe() { + MaybePackage::Package(p) => p.manifest().replace(), + MaybePackage::Virtual(vm) => vm.replace(), + } + } + + /// Returns the root [patch] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_patch(&self) -> &HashMap> { + match self.root_maybe() { + MaybePackage::Package(p) => p.manifest().patch(), + MaybePackage::Virtual(vm) => vm.patch(), + } + } + + /// Returns an iterator over all packages in this workspace + pub fn members<'a>(&'a self) -> Members<'a, 'cfg> { + Members { + ws: self, + iter: self.members.iter(), + } + } + + /// Returns an iterator over default packages in this workspace + pub fn default_members<'a>(&'a self) -> Members<'a, 'cfg> { + Members { + ws: self, + iter: self.default_members.iter(), + } + } + + /// Returns true if the package is a member of the workspace. + pub fn is_member(&self, pkg: &Package) -> bool { + self.members().any(|p| p == pkg) + } + + pub fn is_ephemeral(&self) -> bool { + self.is_ephemeral + } + + pub fn require_optional_deps(&self) -> bool { + self.require_optional_deps + } + + pub fn set_require_optional_deps<'a>( + &'a mut self, + require_optional_deps: bool, + ) -> &mut Workspace<'cfg> { + self.require_optional_deps = require_optional_deps; + self + } + + /// Finds the root of a workspace for the crate whose manifest is located + /// at `manifest_path`. + /// + /// This will parse the `Cargo.toml` at `manifest_path` and then interpret + /// the workspace configuration, optionally walking up the filesystem + /// looking for other workspace roots. + /// + /// Returns an error if `manifest_path` isn't actually a valid manifest or + /// if some other transient error happens. + fn find_root(&mut self, manifest_path: &Path) -> CargoResult> { + fn read_root_pointer(member_manifest: &Path, root_link: &str) -> CargoResult { + let path = member_manifest + .parent() + .unwrap() + .join(root_link) + .join("Cargo.toml"); + debug!("find_root - pointer {}", path.display()); + Ok(paths::normalize_path(&path)) + }; + + { + let current = self.packages.load(manifest_path)?; + match *current.workspace_config() { + WorkspaceConfig::Root(_) => { + debug!("find_root - is root {}", manifest_path.display()); + return Ok(Some(manifest_path.to_path_buf())); + } + WorkspaceConfig::Member { + root: Some(ref path_to_root), + } => return Ok(Some(read_root_pointer(manifest_path, path_to_root)?)), + WorkspaceConfig::Member { root: None } => {} + } + } + + for path in paths::ancestors(manifest_path).skip(2) { + if path.ends_with("target/package") { + break; + } + + let ances_manifest_path = path.join("Cargo.toml"); + debug!("find_root - trying {}", ances_manifest_path.display()); + if ances_manifest_path.exists() { + match *self.packages.load(&ances_manifest_path)?.workspace_config() { + WorkspaceConfig::Root(ref ances_root_config) => { + debug!("find_root - found a root checking exclusion"); + if !ances_root_config.is_excluded(manifest_path) { + debug!("find_root - found!"); + return Ok(Some(ances_manifest_path)); + } + } + WorkspaceConfig::Member { + root: Some(ref path_to_root), + } => { + debug!("find_root - found pointer"); + return Ok(Some(read_root_pointer(&ances_manifest_path, path_to_root)?)); + } + WorkspaceConfig::Member { .. } => {} + } + } + + // Don't walk across `CARGO_HOME` when we're looking for the + // workspace root. Sometimes a package will be organized with + // `CARGO_HOME` pointing inside of the workspace root or in the + // current package, but we don't want to mistakenly try to put + // crates.io crates into the workspace by accident. + if self.config.home() == path { + break; + } + } + + Ok(None) + } + + /// After the root of a workspace has been located, probes for all members + /// of a workspace. + /// + /// If the `workspace.members` configuration is present, then this just + /// verifies that those are all valid packages to point to. Otherwise, this + /// will transitively follow all `path` dependencies looking for members of + /// the workspace. + fn find_members(&mut self) -> CargoResult<()> { + let root_manifest_path = match self.root_manifest { + Some(ref path) => path.clone(), + None => { + debug!("find_members - only me as a member"); + self.members.push(self.current_manifest.clone()); + self.default_members.push(self.current_manifest.clone()); + return Ok(()); + } + }; + + let members_paths; + let default_members_paths; + { + let root_package = self.packages.load(&root_manifest_path)?; + match *root_package.workspace_config() { + WorkspaceConfig::Root(ref root_config) => { + members_paths = root_config + .members_paths(root_config.members.as_ref().unwrap_or(&vec![]))?; + default_members_paths = if let Some(ref default) = root_config.default_members { + Some(root_config.members_paths(default)?) + } else { + None + } + } + _ => failure::bail!( + "root of a workspace inferred but wasn't a root: {}", + root_manifest_path.display() + ), + } + } + + for path in members_paths { + self.find_path_deps(&path.join("Cargo.toml"), &root_manifest_path, false)?; + } + + if let Some(default) = default_members_paths { + for path in default { + let manifest_path = paths::normalize_path(&path.join("Cargo.toml")); + if !self.members.contains(&manifest_path) { + failure::bail!( + "package `{}` is listed in workspace’s default-members \ + but is not a member.", + path.display() + ) + } + self.default_members.push(manifest_path) + } + } else if self.is_virtual() { + self.default_members = self.members.clone() + } else { + self.default_members.push(self.current_manifest.clone()) + } + + self.find_path_deps(&root_manifest_path, &root_manifest_path, false) + } + + fn find_path_deps( + &mut self, + manifest_path: &Path, + root_manifest: &Path, + is_path_dep: bool, + ) -> CargoResult<()> { + let manifest_path = paths::normalize_path(manifest_path); + if self.members.contains(&manifest_path) { + return Ok(()); + } + if is_path_dep + && !manifest_path.parent().unwrap().starts_with(self.root()) + && self.find_root(&manifest_path)? != self.root_manifest + { + // If `manifest_path` is a path dependency outside of the workspace, + // don't add it, or any of its dependencies, as a members. + return Ok(()); + } + + if let WorkspaceConfig::Root(ref root_config) = + *self.packages.load(root_manifest)?.workspace_config() + { + if root_config.is_excluded(&manifest_path) { + return Ok(()); + } + } + + debug!("find_members - {}", manifest_path.display()); + self.members.push(manifest_path.clone()); + + let candidates = { + let pkg = match *self.packages.load(&manifest_path)? { + MaybePackage::Package(ref p) => p, + MaybePackage::Virtual(_) => return Ok(()), + }; + pkg.dependencies() + .iter() + .map(|d| d.source_id()) + .filter(|d| d.is_path()) + .filter_map(|d| d.url().to_file_path().ok()) + .map(|p| p.join("Cargo.toml")) + .collect::>() + }; + for candidate in candidates { + self.find_path_deps(&candidate, root_manifest, true) + .map_err(|err| ManifestError::new(err, manifest_path.clone()))?; + } + Ok(()) + } + + /// Validates a workspace, ensuring that a number of invariants are upheld: + /// + /// 1. A workspace only has one root. + /// 2. All workspace members agree on this one root as the root. + /// 3. The current crate is a member of this workspace. + fn validate(&mut self) -> CargoResult<()> { + // Validate config profiles only once per workspace. + let features = match self.root_maybe() { + MaybePackage::Package(p) => p.manifest().features(), + MaybePackage::Virtual(vm) => vm.features(), + }; + let mut warnings = Vec::new(); + self.config.profiles()?.validate(features, &mut warnings)?; + for warning in warnings { + self.config.shell().warn(&warning)?; + } + + // The rest of the checks require a VirtualManifest or multiple members. + if self.root_manifest.is_none() { + return Ok(()); + } + + let mut roots = Vec::new(); + { + let mut names = BTreeMap::new(); + for member in self.members.iter() { + let package = self.packages.get(member); + match *package.workspace_config() { + WorkspaceConfig::Root(_) => { + roots.push(member.parent().unwrap().to_path_buf()); + } + WorkspaceConfig::Member { .. } => {} + } + let name = match *package { + MaybePackage::Package(ref p) => p.name(), + MaybePackage::Virtual(_) => continue, + }; + if let Some(prev) = names.insert(name, member) { + failure::bail!( + "two packages named `{}` in this workspace:\n\ + - {}\n\ + - {}", + name, + prev.display(), + member.display() + ); + } + } + } + + match roots.len() { + 0 => failure::bail!( + "`package.workspace` configuration points to a crate \ + which is not configured with [workspace]: \n\ + configuration at: {}\n\ + points to: {}", + self.current_manifest.display(), + self.root_manifest.as_ref().unwrap().display() + ), + 1 => {} + _ => { + failure::bail!( + "multiple workspace roots found in the same workspace:\n{}", + roots + .iter() + .map(|r| format!(" {}", r.display())) + .collect::>() + .join("\n") + ); + } + } + + for member in self.members.clone() { + let root = self.find_root(&member)?; + if root == self.root_manifest { + continue; + } + + match root { + Some(root) => { + failure::bail!( + "package `{}` is a member of the wrong workspace\n\ + expected: {}\n\ + actual: {}", + member.display(), + self.root_manifest.as_ref().unwrap().display(), + root.display() + ); + } + None => { + failure::bail!( + "workspace member `{}` is not hierarchically below \ + the workspace root `{}`", + member.display(), + self.root_manifest.as_ref().unwrap().display() + ); + } + } + } + + if !self.members.contains(&self.current_manifest) { + let root = self.root_manifest.as_ref().unwrap(); + let root_dir = root.parent().unwrap(); + let current_dir = self.current_manifest.parent().unwrap(); + let root_pkg = self.packages.get(root); + + // FIXME: Make this more generic by using a relative path resolver between member and + // root. + let members_msg = match current_dir.strip_prefix(root_dir) { + Ok(rel) => format!( + "this may be fixable by adding `{}` to the \ + `workspace.members` array of the manifest \ + located at: {}", + rel.display(), + root.display() + ), + Err(_) => format!( + "this may be fixable by adding a member to \ + the `workspace.members` array of the \ + manifest located at: {}", + root.display() + ), + }; + let extra = match *root_pkg { + MaybePackage::Virtual(_) => members_msg, + MaybePackage::Package(ref p) => { + let has_members_list = match *p.manifest().workspace_config() { + WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(), + WorkspaceConfig::Member { .. } => unreachable!(), + }; + if !has_members_list { + format!( + "this may be fixable by ensuring that this \ + crate is depended on by the workspace \ + root: {}", + root.display() + ) + } else { + members_msg + } + } + }; + failure::bail!( + "current package believes it's in a workspace when it's not:\n\ + current: {}\n\ + workspace: {}\n\n{}", + self.current_manifest.display(), + root.display(), + extra + ); + } + + if let Some(ref root_manifest) = self.root_manifest { + for pkg in self + .members() + .filter(|p| p.manifest_path() != root_manifest) + { + let manifest = pkg.manifest(); + let emit_warning = |what| -> CargoResult<()> { + let msg = format!( + "{} for the non root package will be ignored, \ + specify {} at the workspace root:\n\ + package: {}\n\ + workspace: {}", + what, + what, + pkg.manifest_path().display(), + root_manifest.display(), + ); + self.config.shell().warn(&msg) + }; + if manifest.original().has_profiles() { + emit_warning("profiles")?; + } + if !manifest.replace().is_empty() { + emit_warning("replace")?; + } + if !manifest.patch().is_empty() { + emit_warning("patch")?; + } + } + } + + Ok(()) + } + + pub fn load(&self, manifest_path: &Path) -> CargoResult { + match self.packages.maybe_get(manifest_path) { + Some(&MaybePackage::Package(ref p)) => return Ok(p.clone()), + Some(&MaybePackage::Virtual(_)) => failure::bail!("cannot load workspace root"), + None => {} + } + + let mut loaded = self.loaded_packages.borrow_mut(); + if let Some(p) = loaded.get(manifest_path).cloned() { + return Ok(p); + } + let source_id = SourceId::for_path(manifest_path.parent().unwrap())?; + let (package, _nested_paths) = ops::read_package(manifest_path, source_id, self.config)?; + loaded.insert(manifest_path.to_path_buf(), package.clone()); + Ok(package) + } + + /// Preload the provided registry with already loaded packages. + /// + /// A workspace may load packages during construction/parsing/early phases + /// for various operations, and this preload step avoids doubly-loading and + /// parsing crates on the filesystem by inserting them all into the registry + /// with their in-memory formats. + pub fn preload(&self, registry: &mut PackageRegistry<'cfg>) { + // These can get weird as this generally represents a workspace during + // `cargo install`. Things like git repositories will actually have a + // `PathSource` with multiple entries in it, so the logic below is + // mostly just an optimization for normal `cargo build` in workspaces + // during development. + if self.is_ephemeral { + return; + } + + for pkg in self.packages.packages.values() { + let pkg = match *pkg { + MaybePackage::Package(ref p) => p.clone(), + MaybePackage::Virtual(_) => continue, + }; + let mut src = PathSource::new( + pkg.manifest_path(), + pkg.package_id().source_id(), + self.config, + ); + src.preload_with(pkg); + registry.add_preloaded(Box::new(src)); + } + } + + pub fn emit_warnings(&self) -> CargoResult<()> { + for (path, maybe_pkg) in &self.packages.packages { + let warnings = match maybe_pkg { + MaybePackage::Package(pkg) => pkg.manifest().warnings().warnings(), + MaybePackage::Virtual(vm) => vm.warnings().warnings(), + }; + let path = path.join("Cargo.toml"); + for warning in warnings { + if warning.is_critical { + let err = failure::format_err!("{}", warning.message); + let cx = + failure::format_err!("failed to parse manifest at `{}`", path.display()); + return Err(err.context(cx).into()); + } else { + let msg = if self.root_manifest.is_none() { + warning.message.to_string() + } else { + // In a workspace, it can be confusing where a warning + // originated, so include the path. + format!("{}: {}", path.display(), warning.message) + }; + self.config.shell().warn(msg)? + } + } + } + Ok(()) + } +} + +impl<'cfg> Packages<'cfg> { + fn get(&self, manifest_path: &Path) -> &MaybePackage { + self.maybe_get(manifest_path).unwrap() + } + + fn maybe_get(&self, manifest_path: &Path) -> Option<&MaybePackage> { + self.packages.get(manifest_path.parent().unwrap()) + } + + fn load(&mut self, manifest_path: &Path) -> CargoResult<&MaybePackage> { + let key = manifest_path.parent().unwrap(); + match self.packages.entry(key.to_path_buf()) { + Entry::Occupied(e) => Ok(e.into_mut()), + Entry::Vacant(v) => { + let source_id = SourceId::for_path(key)?; + let (manifest, _nested_paths) = + read_manifest(manifest_path, source_id, self.config)?; + Ok(v.insert(match manifest { + EitherManifest::Real(manifest) => { + MaybePackage::Package(Package::new(manifest, manifest_path)) + } + EitherManifest::Virtual(vm) => MaybePackage::Virtual(vm), + })) + } + } + } +} + +impl<'a, 'cfg> Iterator for Members<'a, 'cfg> { + type Item = &'a Package; + + fn next(&mut self) -> Option<&'a Package> { + loop { + let next = self.iter.next().map(|path| self.ws.packages.get(path)); + match next { + Some(&MaybePackage::Package(ref p)) => return Some(p), + Some(&MaybePackage::Virtual(_)) => {} + None => return None, + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl MaybePackage { + fn workspace_config(&self) -> &WorkspaceConfig { + match *self { + MaybePackage::Package(ref p) => p.manifest().workspace_config(), + MaybePackage::Virtual(ref vm) => vm.workspace_config(), + } + } +} + +impl WorkspaceRootConfig { + /// Creates a new Intermediate Workspace Root configuration. + pub fn new( + root_dir: &Path, + members: &Option>, + default_members: &Option>, + exclude: &Option>, + ) -> WorkspaceRootConfig { + WorkspaceRootConfig { + root_dir: root_dir.to_path_buf(), + members: members.clone(), + default_members: default_members.clone(), + exclude: exclude.clone().unwrap_or_default(), + } + } + + /// Checks the path against the `excluded` list. + /// + /// This method does **not** consider the `members` list. + fn is_excluded(&self, manifest_path: &Path) -> bool { + let excluded = self + .exclude + .iter() + .any(|ex| manifest_path.starts_with(self.root_dir.join(ex))); + + let explicit_member = match self.members { + Some(ref members) => members + .iter() + .any(|mem| manifest_path.starts_with(self.root_dir.join(mem))), + None => false, + }; + + !explicit_member && excluded + } + + fn has_members_list(&self) -> bool { + self.members.is_some() + } + + fn members_paths(&self, globs: &[String]) -> CargoResult> { + let mut expanded_list = Vec::new(); + + for glob in globs { + let pathbuf = self.root_dir.join(glob); + let expanded_paths = Self::expand_member_path(&pathbuf)?; + + // If glob does not find any valid paths, then put the original + // path in the expanded list to maintain backwards compatibility. + if expanded_paths.is_empty() { + expanded_list.push(pathbuf); + } else { + expanded_list.extend(expanded_paths); + } + } + + Ok(expanded_list) + } + + fn expand_member_path(path: &Path) -> CargoResult> { + let path = match path.to_str() { + Some(p) => p, + None => return Ok(Vec::new()), + }; + let res = + glob(path).chain_err(|| failure::format_err!("could not parse pattern `{}`", &path))?; + let res = res + .map(|p| { + p.chain_err(|| failure::format_err!("unable to match path to pattern `{}`", &path)) + }) + .collect::, _>>()?; + Ok(res) + } +} diff --git a/src/cargo/lib.rs b/src/cargo/lib.rs new file mode 100644 index 000000000..db193897b --- /dev/null +++ b/src/cargo/lib.rs @@ -0,0 +1,216 @@ +#![cfg_attr(test, deny(warnings))] +#![warn(rust_2018_idioms)] +// While we're getting used to 2018: +// Clippy isn't enforced by CI (@alexcrichton isn't a fan). +#![allow(clippy::boxed_local)] // bug rust-lang-nursery/rust-clippy#1123 +#![allow(clippy::cyclomatic_complexity)] // large project +#![allow(clippy::derive_hash_xor_eq)] // there's an intentional incoherence +#![allow(clippy::explicit_into_iter_loop)] // explicit loops are clearer +#![allow(clippy::explicit_iter_loop)] // explicit loops are clearer +#![allow(clippy::identity_op)] // used for vertical alignment +#![allow(clippy::implicit_hasher)] // large project +#![allow(clippy::large_enum_variant)] // large project +#![allow(clippy::redundant_closure_call)] // closures over try catch blocks +#![allow(clippy::too_many_arguments)] // large project +#![allow(clippy::type_complexity)] // there's an exceptionally complex type +#![allow(clippy::wrong_self_convention)] // perhaps `Rc` should be special-cased in Clippy? + +use std::fmt; + +use failure::Error; +use log::debug; +use serde::ser; + +use crate::core::shell::Verbosity::Verbose; +use crate::core::Shell; + +pub use crate::util::errors::Internal; +pub use crate::util::{CargoResult, CliError, CliResult, Config}; + +pub const CARGO_ENV: &str = "CARGO"; + +#[macro_use] +mod macros; + +pub mod core; +pub mod ops; +pub mod sources; +pub mod util; + +pub struct CommitInfo { + pub short_commit_hash: String, + pub commit_hash: String, + pub commit_date: String, +} + +pub struct CfgInfo { + // Information about the Git repository we may have been built from. + pub commit_info: Option, + // The release channel we were built for. + pub release_channel: String, +} + +pub struct VersionInfo { + pub major: u8, + pub minor: u8, + pub patch: u8, + pub pre_release: Option, + // Information that's only available when we were built with + // configure/make, rather than Cargo itself. + pub cfg_info: Option, +} + +impl fmt::Display for VersionInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "cargo {}.{}.{}", self.major, self.minor, self.patch)?; + if let Some(channel) = self.cfg_info.as_ref().map(|ci| &ci.release_channel) { + if channel != "stable" { + write!(f, "-{}", channel)?; + let empty = String::new(); + write!(f, "{}", self.pre_release.as_ref().unwrap_or(&empty))?; + } + }; + + if let Some(ref cfg) = self.cfg_info { + if let Some(ref ci) = cfg.commit_info { + write!(f, " ({} {})", ci.short_commit_hash, ci.commit_date)?; + } + }; + Ok(()) + } +} + +pub fn print_json(obj: &T) { + let encoded = serde_json::to_string(&obj).unwrap(); + println!("{}", encoded); +} + +pub fn exit_with_error(err: CliError, shell: &mut Shell) -> ! { + debug!("exit_with_error; err={:?}", err); + if let Some(ref err) = err.error { + if let Some(clap_err) = err.downcast_ref::() { + clap_err.exit() + } + } + + let CliError { + error, + exit_code, + unknown, + } = err; + // `exit_code` of 0 means non-fatal error (e.g., docopt version info). + let fatal = exit_code != 0; + + let hide = unknown && shell.verbosity() != Verbose; + + if let Some(error) = error { + if hide { + drop(shell.error("An unknown error occurred")) + } else if fatal { + drop(shell.error(&error)) + } else { + println!("{}", error); + } + + if !handle_cause(&error, shell) || hide { + drop(writeln!( + shell.err(), + "\nTo learn more, run the command again \ + with --verbose." + )); + } + } + + std::process::exit(exit_code) +} + +pub fn handle_error(err: &failure::Error, shell: &mut Shell) { + debug!("handle_error; err={:?}", err); + + let _ignored_result = shell.error(err); + handle_cause(err, shell); +} + +fn handle_cause(cargo_err: &Error, shell: &mut Shell) -> bool { + fn print(error: &str, shell: &mut Shell) { + drop(writeln!(shell.err(), "\nCaused by:")); + drop(writeln!(shell.err(), " {}", error)); + } + + let verbose = shell.verbosity(); + + if verbose == Verbose { + // The first error has already been printed to the shell. + // Print all remaining errors. + for err in cargo_err.iter_causes() { + print(&err.to_string(), shell); + } + } else { + // The first error has already been printed to the shell. + // Print remaining errors until one marked as `Internal` appears. + for err in cargo_err.iter_causes() { + if err.downcast_ref::().is_some() { + return false; + } + + print(&err.to_string(), shell); + } + } + + true +} + +pub fn version() -> VersionInfo { + macro_rules! option_env_str { + ($name:expr) => { + option_env!($name).map(|s| s.to_string()) + }; + } + + // So this is pretty horrible... + // There are two versions at play here: + // - version of cargo-the-binary, which you see when you type `cargo --version` + // - version of cargo-the-library, which you download from crates.io for use + // in your packages. + // + // We want to make the `binary` version the same as the corresponding Rust/rustc release. + // At the same time, we want to keep the library version at `0.x`, because Cargo as + // a library is (and probably will always be) unstable. + // + // Historically, Cargo used the same version number for both the binary and the library. + // Specifically, rustc 1.x.z was paired with cargo 0.x+1.w. + // We continue to use this scheme for the library, but transform it to 1.x.w for the purposes + // of `cargo --version`. + let major = 1; + let minor = env!("CARGO_PKG_VERSION_MINOR").parse::().unwrap() - 1; + let patch = env!("CARGO_PKG_VERSION_PATCH").parse::().unwrap(); + + match option_env!("CFG_RELEASE_CHANNEL") { + // We have environment variables set up from configure/make. + Some(_) => { + let commit_info = option_env!("CFG_COMMIT_HASH").map(|s| CommitInfo { + commit_hash: s.to_string(), + short_commit_hash: option_env_str!("CFG_SHORT_COMMIT_HASH").unwrap(), + commit_date: option_env_str!("CFG_COMMIT_DATE").unwrap(), + }); + VersionInfo { + major, + minor, + patch, + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: Some(CfgInfo { + release_channel: option_env_str!("CFG_RELEASE_CHANNEL").unwrap(), + commit_info, + }), + } + } + // We are being compiled by Cargo itself. + None => VersionInfo { + major, + minor, + patch, + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: None, + }, + } +} diff --git a/src/cargo/macros.rs b/src/cargo/macros.rs new file mode 100644 index 000000000..3ebf3b37f --- /dev/null +++ b/src/cargo/macros.rs @@ -0,0 +1,49 @@ +use std::fmt; + +macro_rules! compact_debug { + ( + impl fmt::Debug for $ty:ident { + fn fmt(&$this:ident, f: &mut fmt::Formatter) -> fmt::Result { + let (default, default_name) = $e:expr; + [debug_the_fields($($field:ident)*)] + } + } + ) => ( + + impl fmt::Debug for $ty { + fn fmt(&$this, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Try printing a pretty version where we collapse as many fields as + // possible, indicating that they're equivalent to a function call + // that's hopefully enough to indicate what each value is without + // actually dumping everything so verbosely. + let mut s = f.debug_struct(stringify!($ty)); + let (default, default_name) = $e; + let mut any_default = false; + + // Exhaustively match so when fields are added we get a compile + // failure + let $ty { $($field),* } = $this; + $( + if *$field == default.$field { + any_default = true; + } else { + s.field(stringify!($field), $field); + } + )* + + if any_default { + s.field("..", &crate::macros::DisplayAsDebug(default_name)); + } + s.finish() + } + } + ) +} + +pub struct DisplayAsDebug(pub T); + +impl fmt::Debug for DisplayAsDebug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} diff --git a/src/cargo/ops/cargo_clean.rs b/src/cargo/ops/cargo_clean.rs new file mode 100644 index 000000000..8dae82603 --- /dev/null +++ b/src/cargo/ops/cargo_clean.rs @@ -0,0 +1,147 @@ +use std::collections::HashMap; +use std::fs; +use std::path::Path; + +use crate::core::compiler::{BuildConfig, BuildContext, CompileMode, Context, Kind, Unit}; +use crate::core::profiles::UnitFor; +use crate::core::Workspace; +use crate::ops; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::Config; + +pub struct CleanOptions<'a> { + pub config: &'a Config, + /// A list of packages to clean. If empty, everything is cleaned. + pub spec: Vec, + /// The target arch triple to clean, or None for the host arch + pub target: Option, + /// Whether to clean the release directory + pub release: bool, + /// Whether to just clean the doc directory + pub doc: bool, +} + +/// Cleans the package's build artifacts. +pub fn clean(ws: &Workspace<'_>, opts: &CleanOptions<'_>) -> CargoResult<()> { + let mut target_dir = ws.target_dir(); + let config = ws.config(); + + // If the doc option is set, we just want to delete the doc directory. + if opts.doc { + target_dir = target_dir.join("doc"); + return rm_rf(&target_dir.into_path_unlocked(), config); + } + + // If the release option is set, we set target to release directory + if opts.release { + target_dir = target_dir.join("release"); + } + + // If we have a spec, then we need to delete some packages, otherwise, just + // remove the whole target directory and be done with it! + // + // Note that we don't bother grabbing a lock here as we're just going to + // blow it all away anyway. + if opts.spec.is_empty() { + return rm_rf(&target_dir.into_path_unlocked(), config); + } + + let (packages, resolve) = ops::resolve_ws(ws)?; + + let profiles = ws.profiles(); + let mut units = Vec::new(); + + for spec in opts.spec.iter() { + // Translate the spec to a Package + let pkgid = resolve.query(spec)?; + let pkg = packages.get_one(pkgid)?; + + // Generate all relevant `Unit` targets for this package + for target in pkg.targets() { + for kind in [Kind::Host, Kind::Target].iter() { + for mode in CompileMode::all_modes() { + for unit_for in UnitFor::all_values() { + let profile = if mode.is_run_custom_build() { + profiles.get_profile_run_custom_build(&profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + *unit_for, + CompileMode::Build, + opts.release, + )) + } else { + profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + *unit_for, + *mode, + opts.release, + ) + }; + units.push(Unit { + pkg, + target, + profile, + kind: *kind, + mode: *mode, + }); + } + } + } + } + } + + let mut build_config = BuildConfig::new(config, Some(1), &opts.target, CompileMode::Build)?; + build_config.release = opts.release; + let bcx = BuildContext::new( + ws, + &resolve, + &packages, + opts.config, + &build_config, + profiles, + HashMap::new(), + )?; + let mut cx = Context::new(config, &bcx)?; + cx.prepare_units(None, &units)?; + + for unit in units.iter() { + rm_rf(&cx.files().fingerprint_dir(unit), config)?; + if unit.target.is_custom_build() { + if unit.mode.is_run_custom_build() { + rm_rf(&cx.files().build_script_out_dir(unit), config)?; + } else { + rm_rf(&cx.files().build_script_dir(unit), config)?; + } + continue; + } + + for output in cx.outputs(unit)?.iter() { + rm_rf(&output.path, config)?; + if let Some(ref dst) = output.hardlink { + rm_rf(dst, config)?; + } + } + } + + Ok(()) +} + +fn rm_rf(path: &Path, config: &Config) -> CargoResult<()> { + let m = fs::metadata(path); + if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { + config + .shell() + .verbose(|shell| shell.status("Removing", path.display()))?; + paths::remove_dir_all(path) + .chain_err(|| failure::format_err!("could not remove build directory"))?; + } else if m.is_ok() { + config + .shell() + .verbose(|shell| shell.status("Removing", path.display()))?; + paths::remove_file(path) + .chain_err(|| failure::format_err!("failed to remove build artifact"))?; + } + Ok(()) +} diff --git a/src/cargo/ops/cargo_compile.rs b/src/cargo/ops/cargo_compile.rs new file mode 100644 index 000000000..169fcaab9 --- /dev/null +++ b/src/cargo/ops/cargo_compile.rs @@ -0,0 +1,880 @@ +//! Cargo `compile` currently does the following steps. +//! +//! All configurations are already injected as environment variables via the +//! main cargo command. +//! +//! 1. Read the manifest. +//! 2. Shell out to `cargo-resolve` with a list of dependencies and sources as +//! stdin. +//! +//! a. Shell out to `--do update` and `--do list` for each source. +//! b. Resolve dependencies and return a list of name/version/source. +//! +//! 3. Shell out to `--do download` for each source. +//! 4. Shell out to `--do get` for each source, and build up the list of paths +//! to pass to `rustc -L`. +//! 5. Call `cargo-rustc` with the results of the resolver zipped together with +//! the results of the `get`. +//! +//! a. Topologically sort the dependencies. +//! b. Compile each dependency in order, passing in the -L's pointing at each +//! previously compiled dependency. + +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::iter::FromIterator; +use std::path::PathBuf; +use std::sync::Arc; + +use crate::core::compiler::{ + BuildConfig, BuildContext, Compilation, Context, DefaultExecutor, Executor, +}; +use crate::core::compiler::{CompileMode, Kind, Unit}; +use crate::core::profiles::{Profiles, UnitFor}; +use crate::core::resolver::{Method, Resolve}; +use crate::core::{Package, Source, Target}; +use crate::core::{PackageId, PackageIdSpec, TargetKind, Workspace}; +use crate::ops; +use crate::util::config::Config; +use crate::util::{lev_distance, profile, CargoResult}; + +/// Contains information about how a package should be compiled. +#[derive(Debug)] +pub struct CompileOptions<'a> { + pub config: &'a Config, + /// Configuration information for a rustc build + pub build_config: BuildConfig, + /// Extra features to build for the root package + pub features: Vec, + /// Flag whether all available features should be built for the root package + pub all_features: bool, + /// Flag if the default feature should be built for the root package + pub no_default_features: bool, + /// A set of packages to build. + pub spec: Packages, + /// Filter to apply to the root package to select which targets will be + /// built. + pub filter: CompileFilter, + /// Extra arguments to be passed to rustdoc (single target only) + pub target_rustdoc_args: Option>, + /// The specified target will be compiled with all the available arguments, + /// note that this only accounts for the *final* invocation of rustc + pub target_rustc_args: Option>, + /// Extra arguments passed to all selected targets for rustdoc. + pub local_rustdoc_args: Option>, + /// The directory to copy final artifacts to. Note that even if `out_dir` is + /// set, a copy of artifacts still could be found a `target/(debug\release)` + /// as usual. + // Note that, although the cmd-line flag name is `out-dir`, in code we use + // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. + pub export_dir: Option, +} + +impl<'a> CompileOptions<'a> { + pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult> { + Ok(CompileOptions { + config, + build_config: BuildConfig::new(config, None, &None, mode)?, + features: Vec::new(), + all_features: false, + no_default_features: false, + spec: ops::Packages::Packages(Vec::new()), + filter: CompileFilter::Default { + required_features_filterable: false, + }, + target_rustdoc_args: None, + target_rustc_args: None, + local_rustdoc_args: None, + export_dir: None, + }) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Packages { + Default, + All, + OptOut(Vec), + Packages(Vec), +} + +impl Packages { + pub fn from_flags(all: bool, exclude: Vec, package: Vec) -> CargoResult { + Ok(match (all, exclude.len(), package.len()) { + (false, 0, 0) => Packages::Default, + (false, 0, _) => Packages::Packages(package), + (false, _, _) => failure::bail!("--exclude can only be used together with --all"), + (true, 0, _) => Packages::All, + (true, _, _) => Packages::OptOut(exclude), + }) + } + + pub fn to_package_id_specs(&self, ws: &Workspace<'_>) -> CargoResult> { + let specs = match self { + Packages::All => ws + .members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + Packages::OptOut(opt_out) => { + let mut opt_out = BTreeSet::from_iter(opt_out.iter().cloned()); + let packages = ws + .members() + .filter(|pkg| !opt_out.remove(pkg.name().as_str())) + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(); + if !opt_out.is_empty() { + ws.config().shell().warn(format!( + "excluded package(s) {} not found in workspace `{}`", + opt_out.iter().map(|x| x.as_ref()).collect::>().join(", "), + ws.root().display(), + ))?; + } + packages + }, + Packages::Packages(packages) if packages.is_empty() => { + vec![PackageIdSpec::from_package_id(ws.current()?.package_id())] + } + Packages::Packages(packages) => packages + .iter() + .map(|p| PackageIdSpec::parse(p)) + .collect::>>()?, + Packages::Default => ws + .default_members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + }; + if specs.is_empty() { + if ws.is_virtual() { + failure::bail!( + "manifest path `{}` contains no package: The manifest is virtual, \ + and the workspace has no members.", + ws.root().display() + ) + } + failure::bail!("no packages to compile") + } + Ok(specs) + } + + pub fn get_packages<'ws>(&self, ws: &'ws Workspace<'_>) -> CargoResult> { + let packages: Vec<_> = match self { + Packages::Default => ws.default_members().collect(), + Packages::All => ws.members().collect(), + Packages::OptOut(opt_out) => ws + .members() + .filter(|pkg| !opt_out.iter().any(|name| pkg.name().as_str() == name)) + .collect(), + Packages::Packages(packages) => packages + .iter() + .map(|name| { + ws.members() + .find(|pkg| pkg.name().as_str() == name) + .ok_or_else(|| { + failure::format_err!( + "package `{}` is not a member of the workspace", + name + ) + }) + }) + .collect::>>()?, + }; + Ok(packages) + } +} + +#[derive(Debug)] +pub enum FilterRule { + All, + Just(Vec), +} + +#[derive(Debug)] +pub enum CompileFilter { + Default { + /// Flag whether targets can be safely skipped when required-features are not satisfied. + required_features_filterable: bool, + }, + Only { + all_targets: bool, + lib: bool, + bins: FilterRule, + examples: FilterRule, + tests: FilterRule, + benches: FilterRule, + }, +} + +pub fn compile<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, +) -> CargoResult> { + let exec: Arc = Arc::new(DefaultExecutor); + compile_with_exec(ws, options, &exec) +} + +/// Like `compile` but allows specifying a custom `Executor` that will be able to intercept build +/// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through. +pub fn compile_with_exec<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, + exec: &Arc, +) -> CargoResult> { + ws.emit_warnings()?; + compile_ws(ws, None, options, exec) +} + +pub fn compile_ws<'a>( + ws: &Workspace<'a>, + source: Option>, + options: &CompileOptions<'a>, + exec: &Arc, +) -> CargoResult> { + let CompileOptions { + config, + ref build_config, + ref spec, + ref features, + all_features, + no_default_features, + ref filter, + ref target_rustdoc_args, + ref target_rustc_args, + ref local_rustdoc_args, + ref export_dir, + } = *options; + + let default_arch_kind = if build_config.requested_target.is_some() { + Kind::Target + } else { + Kind::Host + }; + + let specs = spec.to_package_id_specs(ws)?; + let features = Method::split_features(features); + let method = Method::Required { + dev_deps: ws.require_optional_deps() || filter.need_dev_deps(build_config.mode), + features: &features, + all_features, + uses_default_features: !no_default_features, + }; + let resolve = ops::resolve_ws_with_method(ws, source, method, &specs)?; + let (packages, resolve_with_overrides) = resolve; + + let to_build_ids = specs + .iter() + .map(|s| s.query(resolve_with_overrides.iter())) + .collect::>>()?; + let mut to_builds = packages.get_many(to_build_ids)?; + + // The ordering here affects some error messages coming out of cargo, so + // let's be test and CLI friendly by always printing in the same order if + // there's an error. + to_builds.sort_by_key(|p| p.package_id()); + + for pkg in to_builds.iter() { + pkg.manifest().print_teapot(ws.config()); + + if build_config.mode.is_any_test() + && !ws.is_member(pkg) + && pkg.dependencies().iter().any(|dep| !dep.is_transitive()) + { + failure::bail!( + "package `{}` cannot be tested because it requires dev-dependencies \ + and is not a member of the workspace", + pkg.name() + ); + } + } + + let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) { + (&Some(ref args), _) => (Some(args.clone()), "rustc"), + (_, &Some(ref args)) => (Some(args.clone()), "rustdoc"), + _ => (None, ""), + }; + + if extra_args.is_some() && to_builds.len() != 1 { + panic!( + "`{}` should not accept multiple `-p` flags", + extra_args_name + ); + } + + let profiles = ws.profiles(); + profiles.validate_packages(&mut config.shell(), &packages)?; + + let units = generate_targets( + ws, + profiles, + &to_builds, + filter, + default_arch_kind, + &resolve_with_overrides, + build_config, + )?; + + let mut extra_compiler_args = HashMap::new(); + if let Some(args) = extra_args { + if units.len() != 1 { + failure::bail!( + "extra arguments to `{}` can only be passed to one \ + target, consider filtering\nthe package by passing, \ + e.g., `--lib` or `--bin NAME` to specify a single target", + extra_args_name + ); + } + extra_compiler_args.insert(units[0], args); + } + if let Some(args) = local_rustdoc_args { + for unit in &units { + if unit.mode.is_doc() { + extra_compiler_args.insert(*unit, args.clone()); + } + } + } + + let ret = { + let _p = profile::start("compiling"); + let bcx = BuildContext::new( + ws, + &resolve_with_overrides, + &packages, + config, + &build_config, + profiles, + extra_compiler_args, + )?; + let cx = Context::new(config, &bcx)?; + cx.compile(&units, export_dir.clone(), &exec)? + }; + + Ok(ret) +} + +impl FilterRule { + pub fn new(targets: Vec, all: bool) -> FilterRule { + if all { + FilterRule::All + } else { + FilterRule::Just(targets) + } + } + + fn matches(&self, target: &Target) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => targets.iter().any(|x| *x == target.name()), + } + } + + fn is_specific(&self) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => !targets.is_empty(), + } + } + + pub fn try_collect(&self) -> Option> { + match *self { + FilterRule::All => None, + FilterRule::Just(ref targets) => Some(targets.clone()), + } + } +} + +impl CompileFilter { + pub fn new( + lib_only: bool, + bins: Vec, + all_bins: bool, + tsts: Vec, + all_tsts: bool, + exms: Vec, + all_exms: bool, + bens: Vec, + all_bens: bool, + all_targets: bool, + ) -> CompileFilter { + let rule_bins = FilterRule::new(bins, all_bins); + let rule_tsts = FilterRule::new(tsts, all_tsts); + let rule_exms = FilterRule::new(exms, all_exms); + let rule_bens = FilterRule::new(bens, all_bens); + + if all_targets { + CompileFilter::Only { + all_targets: true, + lib: true, + bins: FilterRule::All, + examples: FilterRule::All, + benches: FilterRule::All, + tests: FilterRule::All, + } + } else if lib_only + || rule_bins.is_specific() + || rule_tsts.is_specific() + || rule_exms.is_specific() + || rule_bens.is_specific() + { + CompileFilter::Only { + all_targets: false, + lib: lib_only, + bins: rule_bins, + examples: rule_exms, + benches: rule_bens, + tests: rule_tsts, + } + } else { + CompileFilter::Default { + required_features_filterable: true, + } + } + } + + pub fn need_dev_deps(&self, mode: CompileMode) -> bool { + match mode { + CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true, + CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self + { + CompileFilter::Default { .. } => false, + CompileFilter::Only { + ref examples, + ref tests, + ref benches, + .. + } => examples.is_specific() || tests.is_specific() || benches.is_specific(), + }, + CompileMode::RunCustomBuild => panic!("Invalid mode"), + } + } + + // this selects targets for "cargo run". for logic to select targets for + // other subcommands, see generate_targets and filter_default_targets + pub fn target_run(&self, target: &Target) -> bool { + match *self { + CompileFilter::Default { .. } => true, + CompileFilter::Only { + lib, + ref bins, + ref examples, + ref tests, + ref benches, + .. + } => { + let rule = match *target.kind() { + TargetKind::Bin => bins, + TargetKind::Test => tests, + TargetKind::Bench => benches, + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples, + TargetKind::Lib(..) => return lib, + TargetKind::CustomBuild => return false, + }; + rule.matches(target) + } + } + } + + pub fn is_specific(&self) -> bool { + match *self { + CompileFilter::Default { .. } => false, + CompileFilter::Only { .. } => true, + } + } +} + +/// A proposed target. +/// +/// Proposed targets are later filtered into actual `Unit`s based on whether or +/// not the target requires its features to be present. +#[derive(Debug)] +struct Proposal<'a> { + pkg: &'a Package, + target: &'a Target, + /// Indicates whether or not all required features *must* be present. If + /// false, and the features are not available, then it will be silently + /// skipped. Generally, targets specified by name (`--bin foo`) are + /// required, all others can be silently skipped if features are missing. + requires_features: bool, + mode: CompileMode, +} + +/// Generates all the base targets for the packages the user has requested to +/// compile. Dependencies for these targets are computed later in `unit_dependencies`. +fn generate_targets<'a>( + ws: &Workspace<'_>, + profiles: &Profiles, + packages: &[&'a Package], + filter: &CompileFilter, + default_arch_kind: Kind, + resolve: &Resolve, + build_config: &BuildConfig, +) -> CargoResult>> { + // Helper for creating a `Unit` struct. + let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| { + let unit_for = if build_config.mode.is_any_test() { + // NOTE: the `UnitFor` here is subtle. If you have a profile + // with `panic` set, the `panic` flag is cleared for + // tests/benchmarks and their dependencies. If this + // was `normal`, then the lib would get compiled three + // times (once with panic, once without, and once with + // `--test`). + // + // This would cause a problem for doc tests, which would fail + // because `rustdoc` would attempt to link with both libraries + // at the same time. Also, it's probably not important (or + // even desirable?) for rustdoc to link with a lib with + // `panic` set. + // + // As a consequence, Examples and Binaries get compiled + // without `panic` set. This probably isn't a bad deal. + // + // Forcing the lib to be compiled three times during `cargo + // test` is probably also not desirable. + UnitFor::new_test() + } else if target.for_host() { + // Proc macro / plugin should not have `panic` set. + UnitFor::new_compiler() + } else { + UnitFor::new_normal() + }; + // Custom build units are added in `build_unit_dependencies`. + assert!(!target.is_custom_build()); + let target_mode = match target_mode { + CompileMode::Test => { + if target.is_example() && !filter.is_specific() && !target.tested() { + // Examples are included as regular binaries to verify + // that they compile. + CompileMode::Build + } else { + CompileMode::Test + } + } + CompileMode::Build => match *target.kind() { + TargetKind::Test => CompileMode::Test, + TargetKind::Bench => CompileMode::Bench, + _ => CompileMode::Build, + }, + // `CompileMode::Bench` is only used to inform `filter_default_targets` + // which command is being used (`cargo bench`). Afterwards, tests + // and benches are treated identically. Switching the mode allows + // de-duplication of units that are essentially identical. For + // example, `cargo build --all-targets --release` creates the units + // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) + // and since these are the same, we want them to be de-duplicated in + // `unit_dependencies`. + CompileMode::Bench => CompileMode::Test, + _ => target_mode, + }; + // Plugins or proc macros should be built for the host. + let kind = if target.for_host() { + Kind::Host + } else { + default_arch_kind + }; + let profile = profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + unit_for, + target_mode, + build_config.release, + ); + Unit { + pkg, + target, + profile, + kind, + mode: target_mode, + } + }; + + // Create a list of proposed targets. + let mut proposals: Vec> = Vec::new(); + + match *filter { + CompileFilter::Default { + required_features_filterable, + } => { + for pkg in packages { + let default = filter_default_targets(pkg.targets(), build_config.mode); + proposals.extend(default.into_iter().map(|target| Proposal { + pkg, + target, + requires_features: !required_features_filterable, + mode: build_config.mode, + })); + if build_config.mode == CompileMode::Test { + if let Some(t) = pkg + .targets() + .iter() + .find(|t| t.is_lib() && t.doctested() && t.doctestable()) + { + proposals.push(Proposal { + pkg, + target: t, + requires_features: false, + mode: CompileMode::Doctest, + }); + } + } + } + } + CompileFilter::Only { + all_targets, + lib, + ref bins, + ref examples, + ref tests, + ref benches, + } => { + if lib { + let mut libs = Vec::new(); + for proposal in filter_targets(packages, Target::is_lib, false, build_config.mode) { + let Proposal { target, pkg, .. } = proposal; + if build_config.mode == CompileMode::Doctest && !target.doctestable() { + ws.config().shell().warn(format!( + "doc tests are not supported for crate type(s) `{}` in package `{}`", + target.rustc_crate_types().join(", "), + pkg.name() + ))?; + } else { + libs.push(proposal) + } + } + if !all_targets && libs.is_empty() { + let names = packages.iter().map(|pkg| pkg.name()).collect::>(); + if names.len() == 1 { + failure::bail!("no library targets found in package `{}`", names[0]); + } else { + failure::bail!( + "no library targets found in packages: {}", + names.join(", ") + ); + } + } + proposals.extend(libs); + } + + // If `--tests` was specified, add all targets that would be + // generated by `cargo test`. + let test_filter = match tests { + FilterRule::All => Target::tested, + FilterRule::Just(_) => Target::is_test, + }; + let test_mode = match build_config.mode { + CompileMode::Build => CompileMode::Test, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => build_config.mode, + }; + // If `--benches` was specified, add all targets that would be + // generated by `cargo bench`. + let bench_filter = match benches { + FilterRule::All => Target::benched, + FilterRule::Just(_) => Target::is_bench, + }; + let bench_mode = match build_config.mode { + CompileMode::Build => CompileMode::Bench, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => build_config.mode, + }; + + proposals.extend(list_rule_targets( + packages, + bins, + "bin", + Target::is_bin, + build_config.mode, + )?); + proposals.extend(list_rule_targets( + packages, + examples, + "example", + Target::is_example, + build_config.mode, + )?); + proposals.extend(list_rule_targets( + packages, + tests, + "test", + test_filter, + test_mode, + )?); + proposals.extend(list_rule_targets( + packages, + benches, + "bench", + bench_filter, + bench_mode, + )?); + } + } + + // Only include targets that are libraries or have all required + // features available. + let mut features_map = HashMap::new(); + let mut units = HashSet::new(); + for Proposal { + pkg, + target, + requires_features, + mode, + } in proposals + { + let unavailable_features = match target.required_features() { + Some(rf) => { + let features = features_map + .entry(pkg) + .or_insert_with(|| resolve_all_features(resolve, pkg.package_id())); + rf.iter().filter(|f| !features.contains(*f)).collect() + } + None => Vec::new(), + }; + if target.is_lib() || unavailable_features.is_empty() { + let unit = new_unit(pkg, target, mode); + units.insert(unit); + } else if requires_features { + let required_features = target.required_features().unwrap(); + let quoted_required_features: Vec = required_features + .iter() + .map(|s| format!("`{}`", s)) + .collect(); + failure::bail!( + "target `{}` in package `{}` requires the features: {}\n\ + Consider enabling them by passing, e.g., `--features=\"{}\"`", + target.name(), + pkg.name(), + quoted_required_features.join(", "), + required_features.join(" ") + ); + } + // else, silently skip target. + } + Ok(units.into_iter().collect()) +} + +fn resolve_all_features( + resolve_with_overrides: &Resolve, + package_id: PackageId, +) -> HashSet { + let mut features = resolve_with_overrides.features(package_id).clone(); + + // Include features enabled for use by dependencies so targets can also use them with the + // required-features field when deciding whether to be built or skipped. + for (dep, _) in resolve_with_overrides.deps(package_id) { + for feature in resolve_with_overrides.features(dep) { + features.insert(dep.name().to_string() + "/" + feature); + } + } + + features +} + +/// Given a list of all targets for a package, filters out only the targets +/// that are automatically included when the user doesn't specify any targets. +fn filter_default_targets(targets: &[Target], mode: CompileMode) -> Vec<&Target> { + match mode { + CompileMode::Bench => targets.iter().filter(|t| t.benched()).collect(), + CompileMode::Test => targets + .iter() + .filter(|t| t.tested() || t.is_example()) + .collect(), + CompileMode::Build | CompileMode::Check { .. } => targets + .iter() + .filter(|t| t.is_bin() || t.is_lib()) + .collect(), + CompileMode::Doc { .. } => { + // `doc` does lib and bins (bin with same name as lib is skipped). + targets + .iter() + .filter(|t| { + t.documented() + && (!t.is_bin() + || !targets.iter().any(|l| l.is_lib() && l.name() == t.name())) + }) + .collect() + } + CompileMode::Doctest | CompileMode::RunCustomBuild => panic!("Invalid mode {:?}", mode), + } +} + +/// Returns a list of proposed targets based on command-line target selection flags. +fn list_rule_targets<'a>( + packages: &[&'a Package], + rule: &FilterRule, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, + mode: CompileMode, +) -> CargoResult>> { + let mut proposals = Vec::new(); + match rule { + FilterRule::All => { + proposals.extend(filter_targets(packages, is_expected_kind, false, mode)) + } + FilterRule::Just(names) => { + for name in names { + proposals.extend(find_named_targets( + packages, + name, + target_desc, + is_expected_kind, + mode, + )?); + } + } + } + Ok(proposals) +} + +/// Finds the targets for a specifically named target. +fn find_named_targets<'a>( + packages: &[&'a Package], + target_name: &str, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, + mode: CompileMode, +) -> CargoResult>> { + let filter = |t: &Target| t.name() == target_name && is_expected_kind(t); + let proposals = filter_targets(packages, filter, true, mode); + if proposals.is_empty() { + let suggestion = packages + .iter() + .flat_map(|pkg| { + pkg.targets() + .iter() + .filter(|target| is_expected_kind(target)) + }) + .map(|target| (lev_distance(target_name, target.name()), target)) + .filter(|&(d, _)| d < 4) + .min_by_key(|t| t.0) + .map(|t| t.1); + match suggestion { + Some(s) => failure::bail!( + "no {} target named `{}`\n\nDid you mean `{}`?", + target_desc, + target_name, + s.name() + ), + None => failure::bail!("no {} target named `{}`", target_desc, target_name), + } + } + Ok(proposals) +} + +fn filter_targets<'a>( + packages: &[&'a Package], + predicate: impl Fn(&Target) -> bool, + requires_features: bool, + mode: CompileMode, +) -> Vec> { + let mut proposals = Vec::new(); + for pkg in packages { + for target in pkg.targets().iter().filter(|t| predicate(t)) { + proposals.push(Proposal { + pkg, + target, + requires_features, + mode, + }); + } + } + proposals +} diff --git a/src/cargo/ops/cargo_doc.rs b/src/cargo/ops/cargo_doc.rs new file mode 100644 index 000000000..a61e9c25c --- /dev/null +++ b/src/cargo/ops/cargo_doc.rs @@ -0,0 +1,110 @@ +use std::collections::HashMap; +use std::fs; +use std::path::Path; + +use failure::Fail; +use opener; + +use crate::core::Workspace; +use crate::ops; +use crate::util::CargoResult; + +/// Strongly typed options for the `cargo doc` command. +#[derive(Debug)] +pub struct DocOptions<'a> { + /// Whether to attempt to open the browser after compiling the docs + pub open_result: bool, + /// Options to pass through to the compiler + pub compile_opts: ops::CompileOptions<'a>, +} + +/// Main method for `cargo doc`. +pub fn doc(ws: &Workspace<'_>, options: &DocOptions<'_>) -> CargoResult<()> { + let specs = options.compile_opts.spec.to_package_id_specs(ws)?; + let resolve = ops::resolve_ws_precisely( + ws, + None, + &options.compile_opts.features, + options.compile_opts.all_features, + options.compile_opts.no_default_features, + &specs, + )?; + let (packages, resolve_with_overrides) = resolve; + + let ids = specs + .iter() + .map(|s| s.query(resolve_with_overrides.iter())) + .collect::>>()?; + let pkgs = packages.get_many(ids)?; + + let mut lib_names = HashMap::new(); + let mut bin_names = HashMap::new(); + for package in &pkgs { + for target in package.targets().iter().filter(|t| t.documented()) { + if target.is_lib() { + if let Some(prev) = lib_names.insert(target.crate_name(), package) { + failure::bail!( + "The library `{}` is specified by packages `{}` and \ + `{}` but can only be documented once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), + prev, + package + ); + } + } else if let Some(prev) = bin_names.insert(target.crate_name(), package) { + failure::bail!( + "The binary `{}` is specified by packages `{}` and \ + `{}` but can be documented only once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), + prev, + package + ); + } + } + } + + ops::compile(ws, &options.compile_opts)?; + + if options.open_result { + let name = if pkgs.len() > 1 { + failure::bail!( + "Passing multiple packages and `open` is not supported.\n\ + Please re-run this command with `-p ` where `` \ + is one of the following:\n {}", + pkgs.iter() + .map(|p| p.name().as_str()) + .collect::>() + .join("\n ") + ); + } else { + match lib_names.keys().chain(bin_names.keys()).nth(0) { + Some(s) => s.to_string(), + None => return Ok(()), + } + }; + + // Don't bother locking here as if this is getting deleted there's + // nothing we can do about it and otherwise if it's getting overwritten + // then that's also ok! + let mut target_dir = ws.target_dir(); + if let Some(ref triple) = options.compile_opts.build_config.requested_target { + target_dir.push(Path::new(triple).file_stem().unwrap()); + } + let path = target_dir.join("doc").join(&name).join("index.html"); + let path = path.into_path_unlocked(); + if fs::metadata(&path).is_ok() { + let mut shell = options.compile_opts.config.shell(); + shell.status("Opening", path.display())?; + if let Err(e) = opener::open(&path) { + shell.warn(format!("Couldn't open docs: {}", e))?; + for cause in (&e as &dyn Fail).iter_chain() { + shell.warn(format!("Caused by:\n {}", cause))?; + } + } + } + } + + Ok(()) +} diff --git a/src/cargo/ops/cargo_fetch.rs b/src/cargo/ops/cargo_fetch.rs new file mode 100644 index 000000000..179b659f8 --- /dev/null +++ b/src/cargo/ops/cargo_fetch.rs @@ -0,0 +1,65 @@ +use crate::core::compiler::{BuildConfig, CompileMode, Kind, TargetInfo}; +use crate::core::{PackageSet, Resolve, Workspace}; +use crate::ops; +use crate::util::CargoResult; +use crate::util::Config; +use std::collections::HashSet; + +pub struct FetchOptions<'a> { + pub config: &'a Config, + /// The target arch triple to fetch dependencies for + pub target: Option, +} + +/// Executes `cargo fetch`. +pub fn fetch<'a>( + ws: &Workspace<'a>, + options: &FetchOptions<'a>, +) -> CargoResult<(Resolve, PackageSet<'a>)> { + let (packages, resolve) = ops::resolve_ws(ws)?; + + let jobs = Some(1); + let config = ws.config(); + let build_config = BuildConfig::new(config, jobs, &options.target, CompileMode::Build)?; + let rustc = config.rustc(Some(ws))?; + let target_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; + { + let mut fetched_packages = HashSet::new(); + let mut deps_to_fetch = ws.members().map(|p| p.package_id()).collect::>(); + let mut to_download = Vec::new(); + + while let Some(id) = deps_to_fetch.pop() { + if !fetched_packages.insert(id) { + continue; + } + + to_download.push(id); + let deps = resolve + .deps(id) + .filter(|&(_id, deps)| { + deps.iter().any(|d| { + // If no target was specified then all dependencies can + // be fetched. + let target = match options.target { + Some(ref t) => t, + None => return true, + }; + // If this dependency is only available for certain + // platforms, make sure we're only fetching it for that + // platform. + let platform = match d.platform() { + Some(p) => p, + None => return true, + }; + platform.matches(target, target_info.cfg()) + }) + }) + .map(|(id, _deps)| id); + deps_to_fetch.extend(deps); + } + packages.get_many(to_download)?; + } + + Ok((resolve, packages)) +} diff --git a/src/cargo/ops/cargo_generate_lockfile.rs b/src/cargo/ops/cargo_generate_lockfile.rs new file mode 100644 index 000000000..c17aa4a10 --- /dev/null +++ b/src/cargo/ops/cargo_generate_lockfile.rs @@ -0,0 +1,220 @@ +use std::collections::{BTreeMap, HashSet}; + +use log::debug; +use termcolor::Color::{self, Cyan, Green, Red}; + +use crate::core::registry::PackageRegistry; +use crate::core::resolver::Method; +use crate::core::PackageId; +use crate::core::{Resolve, SourceId, Workspace}; +use crate::ops; +use crate::util::config::Config; +use crate::util::CargoResult; + +pub struct UpdateOptions<'a> { + pub config: &'a Config, + pub to_update: Vec, + pub precise: Option<&'a str>, + pub aggressive: bool, + pub dry_run: bool, +} + +pub fn generate_lockfile(ws: &Workspace<'_>) -> CargoResult<()> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = ops::resolve_with_previous( + &mut registry, + ws, + Method::Everything, + None, + None, + &[], + true, + true, + )?; + ops::write_pkg_lockfile(ws, &resolve)?; + Ok(()) +} + +pub fn update_lockfile(ws: &Workspace<'_>, opts: &UpdateOptions<'_>) -> CargoResult<()> { + if opts.aggressive && opts.precise.is_some() { + failure::bail!("cannot specify both aggressive and precise simultaneously") + } + + if ws.members().count() == 0 { + failure::bail!("you can't generate a lockfile for an empty workspace.") + } + + if opts.config.cli_unstable().offline { + failure::bail!("you can't update in the offline mode"); + } + + let previous_resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => return generate_lockfile(ws), + }; + let mut registry = PackageRegistry::new(opts.config)?; + let mut to_avoid = HashSet::new(); + + if opts.to_update.is_empty() { + to_avoid.extend(previous_resolve.iter()); + } else { + let mut sources = Vec::new(); + for name in opts.to_update.iter() { + let dep = previous_resolve.query(name)?; + if opts.aggressive { + fill_with_deps(&previous_resolve, dep, &mut to_avoid, &mut HashSet::new()); + } else { + to_avoid.insert(dep); + sources.push(match opts.precise { + Some(precise) => { + // TODO: see comment in `resolve.rs` as well, but this + // seems like a pretty hokey reason to single out + // the registry as well. + let precise = if dep.source_id().is_registry() { + format!("{}={}->{}", dep.name(), dep.version(), precise) + } else { + precise.to_string() + }; + dep.source_id().with_precise(Some(precise)) + } + None => dep.source_id().with_precise(None), + }); + } + } + registry.add_sources(sources)?; + } + + let resolve = ops::resolve_with_previous( + &mut registry, + ws, + Method::Everything, + Some(&previous_resolve), + Some(&to_avoid), + &[], + true, + true, + )?; + + // Summarize what is changing for the user. + let print_change = |status: &str, msg: String, color: Color| { + opts.config.shell().status_with_color(status, msg, color) + }; + for (removed, added) in compare_dependency_graphs(&previous_resolve, &resolve) { + if removed.len() == 1 && added.len() == 1 { + let msg = if removed[0].source_id().is_git() { + format!( + "{} -> #{}", + removed[0], + &added[0].source_id().precise().unwrap()[..8] + ) + } else { + format!("{} -> v{}", removed[0], added[0].version()) + }; + print_change("Updating", msg, Green)?; + } else { + for package in removed.iter() { + print_change("Removing", format!("{}", package), Red)?; + } + for package in added.iter() { + print_change("Adding", format!("{}", package), Cyan)?; + } + } + } + if opts.dry_run { + opts.config + .shell() + .warn("not updating lockfile due to dry run")?; + } else { + ops::write_pkg_lockfile(ws, &resolve)?; + } + return Ok(()); + + fn fill_with_deps<'a>( + resolve: &'a Resolve, + dep: PackageId, + set: &mut HashSet, + visited: &mut HashSet, + ) { + if !visited.insert(dep) { + return; + } + set.insert(dep); + for dep in resolve.deps_not_replaced(dep) { + fill_with_deps(resolve, dep, set, visited); + } + } + + fn compare_dependency_graphs( + previous_resolve: &Resolve, + resolve: &Resolve, + ) -> Vec<(Vec, Vec)> { + fn key(dep: PackageId) -> (&'static str, SourceId) { + (dep.name().as_str(), dep.source_id()) + } + + // Removes all package IDs in `b` from `a`. Note that this is somewhat + // more complicated because the equality for source IDs does not take + // precise versions into account (e.g., git shas), but we want to take + // that into account here. + fn vec_subtract(a: &[PackageId], b: &[PackageId]) -> Vec { + a.iter() + .filter(|a| { + // If this package ID is not found in `b`, then it's definitely + // in the subtracted set. + let i = match b.binary_search(a) { + Ok(i) => i, + Err(..) => return true, + }; + + // If we've found `a` in `b`, then we iterate over all instances + // (we know `b` is sorted) and see if they all have different + // precise versions. If so, then `a` isn't actually in `b` so + // we'll let it through. + // + // Note that we only check this for non-registry sources, + // however, as registries contain enough version information in + // the package ID to disambiguate. + if a.source_id().is_registry() { + return false; + } + b[i..] + .iter() + .take_while(|b| a == b) + .all(|b| a.source_id().precise() != b.source_id().precise()) + }) + .cloned() + .collect() + } + + // Map `(package name, package source)` to `(removed versions, added versions)`. + let mut changes = BTreeMap::new(); + let empty = (Vec::new(), Vec::new()); + for dep in previous_resolve.iter() { + changes + .entry(key(dep)) + .or_insert_with(|| empty.clone()) + .0 + .push(dep); + } + for dep in resolve.iter() { + changes + .entry(key(dep)) + .or_insert_with(|| empty.clone()) + .1 + .push(dep); + } + + for v in changes.values_mut() { + let (ref mut old, ref mut new) = *v; + old.sort(); + new.sort(); + let removed = vec_subtract(old, new); + let added = vec_subtract(new, old); + *old = removed; + *new = added; + } + debug!("{:#?}", changes); + + changes.into_iter().map(|(_, v)| v).collect() + } +} diff --git a/src/cargo/ops/cargo_install.rs b/src/cargo/ops/cargo_install.rs new file mode 100644 index 000000000..b3a3a8062 --- /dev/null +++ b/src/cargo/ops/cargo_install.rs @@ -0,0 +1,503 @@ +use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::{env, fs}; + +use tempfile::Builder as TempFileBuilder; + +use crate::core::compiler::{DefaultExecutor, Executor}; +use crate::core::{Edition, Package, Source, SourceId}; +use crate::core::{PackageId, Workspace}; +use crate::ops::common_for_install_and_uninstall::*; +use crate::ops::{self, CompileFilter}; +use crate::sources::{GitSource, SourceConfigMap}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::Config; +use crate::util::Filesystem; + +struct Transaction { + bins: Vec, +} + +impl Transaction { + fn success(mut self) { + self.bins.clear(); + } +} + +impl Drop for Transaction { + fn drop(&mut self) { + for bin in self.bins.iter() { + let _ = paths::remove_file(bin); + } + } +} + +pub fn install( + root: Option<&str>, + krates: Vec<&str>, + source_id: SourceId, + from_cwd: bool, + vers: Option<&str>, + opts: &ops::CompileOptions<'_>, + force: bool, +) -> CargoResult<()> { + let root = resolve_root(root, opts.config)?; + let map = SourceConfigMap::new(opts.config)?; + + let (installed_anything, scheduled_error) = if krates.len() <= 1 { + install_one( + &root, + &map, + krates.into_iter().next(), + source_id, + from_cwd, + vers, + opts, + force, + true, + )?; + (true, false) + } else { + let mut succeeded = vec![]; + let mut failed = vec![]; + let mut first = true; + for krate in krates { + let root = root.clone(); + let map = map.clone(); + match install_one( + &root, + &map, + Some(krate), + source_id, + from_cwd, + vers, + opts, + force, + first, + ) { + Ok(()) => succeeded.push(krate), + Err(e) => { + crate::handle_error(&e, &mut opts.config.shell()); + failed.push(krate) + } + } + first = false; + } + + let mut summary = vec![]; + if !succeeded.is_empty() { + summary.push(format!("Successfully installed {}!", succeeded.join(", "))); + } + if !failed.is_empty() { + summary.push(format!( + "Failed to install {} (see error(s) above).", + failed.join(", ") + )); + } + if !succeeded.is_empty() || !failed.is_empty() { + opts.config.shell().status("Summary", summary.join(" "))?; + } + + (!succeeded.is_empty(), !failed.is_empty()) + }; + + if installed_anything { + // Print a warning that if this directory isn't in PATH that they won't be + // able to run these commands. + let dst = metadata(opts.config, &root)?.parent().join("bin"); + let path = env::var_os("PATH").unwrap_or_default(); + for path in env::split_paths(&path) { + if path == dst { + return Ok(()); + } + } + + opts.config.shell().warn(&format!( + "be sure to add `{}` to your PATH to be \ + able to run the installed binaries", + dst.display() + ))?; + } + + if scheduled_error { + failure::bail!("some crates failed to install"); + } + + Ok(()) +} + +fn install_one( + root: &Filesystem, + map: &SourceConfigMap<'_>, + krate: Option<&str>, + source_id: SourceId, + from_cwd: bool, + vers: Option<&str>, + opts: &ops::CompileOptions<'_>, + force: bool, + is_first_install: bool, +) -> CargoResult<()> { + let config = opts.config; + + let (pkg, source) = if source_id.is_git() { + select_pkg( + GitSource::new(source_id, config)?, + krate, + vers, + config, + true, + &mut |git| git.read_packages(), + )? + } else if source_id.is_path() { + let mut src = path_source(source_id, config)?; + if !src.path().is_dir() { + failure::bail!( + "`{}` is not a directory. \ + --path must point to a directory containing a Cargo.toml file.", + src.path().display() + ) + } + if !src.path().join("Cargo.toml").exists() { + if from_cwd { + failure::bail!( + "`{}` is not a crate root; specify a crate to \ + install from crates.io, or use --path or --git to \ + specify an alternate source", + src.path().display() + ); + } else { + failure::bail!( + "`{}` does not contain a Cargo.toml file. \ + --path must point to a directory containing a Cargo.toml file.", + src.path().display() + ) + } + } + src.update()?; + select_pkg(src, krate, vers, config, false, &mut |path| { + path.read_packages() + })? + } else { + select_pkg( + map.load(source_id, &HashSet::new())?, + krate, + vers, + config, + is_first_install, + &mut |_| { + failure::bail!( + "must specify a crate to install from \ + crates.io, or use --path or --git to \ + specify alternate source" + ) + }, + )? + }; + + let mut td_opt = None; + let mut needs_cleanup = false; + let overidden_target_dir = if source_id.is_path() { + None + } else if let Some(dir) = config.target_dir()? { + Some(dir) + } else if let Ok(td) = TempFileBuilder::new().prefix("cargo-install").tempdir() { + let p = td.path().to_owned(); + td_opt = Some(td); + Some(Filesystem::new(p)) + } else { + needs_cleanup = true; + Some(Filesystem::new(config.cwd().join("target-install"))) + }; + + let ws = match overidden_target_dir { + Some(dir) => Workspace::ephemeral(pkg, config, Some(dir), false)?, + None => { + let mut ws = Workspace::new(pkg.manifest_path(), config)?; + ws.set_require_optional_deps(false); + ws + } + }; + let pkg = ws.current()?; + + if from_cwd { + if pkg.manifest().edition() == Edition::Edition2015 { + config.shell().warn( + "Using `cargo install` to install the binaries for the \ + package in current working directory is deprecated, \ + use `cargo install --path .` instead. \ + Use `cargo build` if you want to simply build the package.", + )? + } else { + failure::bail!( + "Using `cargo install` to install the binaries for the \ + package in current working directory is no longer supported, \ + use `cargo install --path .` instead. \ + Use `cargo build` if you want to simply build the package." + ) + } + }; + + config.shell().status("Installing", pkg)?; + + // Preflight checks to check up front whether we'll overwrite something. + // We have to check this again afterwards, but may as well avoid building + // anything if we're gonna throw it away anyway. + { + let metadata = metadata(config, root)?; + let list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + check_overwrites(&dst, pkg, &opts.filter, &list, force)?; + } + + let exec: Arc = Arc::new(DefaultExecutor); + let compile = ops::compile_ws(&ws, Some(source), opts, &exec).chain_err(|| { + if let Some(td) = td_opt.take() { + // preserve the temporary directory, so the user can inspect it + td.into_path(); + } + + failure::format_err!( + "failed to compile `{}`, intermediate artifacts can be \ + found at `{}`", + pkg, + ws.target_dir().display() + ) + })?; + let binaries: Vec<(&str, &Path)> = compile + .binaries + .iter() + .map(|bin| { + let name = bin.file_name().unwrap(); + if let Some(s) = name.to_str() { + Ok((s, bin.as_ref())) + } else { + failure::bail!("Binary `{:?}` name can't be serialized into string", name) + } + }) + .collect::>()?; + if binaries.is_empty() { + failure::bail!( + "no binaries are available for install using the selected \ + features" + ); + } + + let metadata = metadata(config, root)?; + let mut list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + let duplicates = check_overwrites(&dst, pkg, &opts.filter, &list, force)?; + + fs::create_dir_all(&dst)?; + + // Copy all binaries to a temporary directory under `dst` first, catching + // some failure modes (e.g., out of space) before touching the existing + // binaries. This directory will get cleaned up via RAII. + let staging_dir = TempFileBuilder::new() + .prefix("cargo-install") + .tempdir_in(&dst)?; + for &(bin, src) in binaries.iter() { + let dst = staging_dir.path().join(bin); + // Try to move if `target_dir` is transient. + if !source_id.is_path() && fs::rename(src, &dst).is_ok() { + continue; + } + fs::copy(src, &dst).chain_err(|| { + failure::format_err!("failed to copy `{}` to `{}`", src.display(), dst.display()) + })?; + } + + let (to_replace, to_install): (Vec<&str>, Vec<&str>) = binaries + .iter() + .map(|&(bin, _)| bin) + .partition(|&bin| duplicates.contains_key(bin)); + + let mut installed = Transaction { bins: Vec::new() }; + + // Move the temporary copies into `dst` starting with new binaries. + for bin in to_install.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Installing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + failure::format_err!("failed to move `{}` to `{}`", src.display(), dst.display()) + })?; + installed.bins.push(dst); + } + + // Repeat for binaries which replace existing ones but don't pop the error + // up until after updating metadata. + let mut replaced_names = Vec::new(); + let result = { + let mut try_install = || -> CargoResult<()> { + for &bin in to_replace.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Replacing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + failure::format_err!( + "failed to move `{}` to `{}`", + src.display(), + dst.display() + ) + })?; + replaced_names.push(bin); + } + Ok(()) + }; + try_install() + }; + + // Update records of replaced binaries. + for &bin in replaced_names.iter() { + if let Some(&Some(ref p)) = duplicates.get(bin) { + if let Some(set) = list.v1_mut().get_mut(p) { + set.remove(bin); + } + } + // Failsafe to force replacing metadata for git packages + // https://github.com/rust-lang/cargo/issues/4582 + if let Some(set) = list.v1_mut().remove(&pkg.package_id()) { + list.v1_mut().insert(pkg.package_id(), set); + } + list.v1_mut() + .entry(pkg.package_id()) + .or_insert_with(BTreeSet::new) + .insert(bin.to_string()); + } + + // Remove empty metadata lines. + let pkgs = list + .v1() + .iter() + .filter_map(|(&p, set)| if set.is_empty() { Some(p) } else { None }) + .collect::>(); + for p in pkgs.iter() { + list.v1_mut().remove(p); + } + + // If installation was successful record newly installed binaries. + if result.is_ok() { + list.v1_mut() + .entry(pkg.package_id()) + .or_insert_with(BTreeSet::new) + .extend(to_install.iter().map(|s| s.to_string())); + } + + let write_result = write_crate_list(&metadata, list); + match write_result { + // Replacement error (if any) isn't actually caused by write error + // but this seems to be the only way to show both. + Err(err) => result.chain_err(|| err)?, + Ok(_) => result?, + } + + // Reaching here means all actions have succeeded. Clean up. + installed.success(); + if needs_cleanup { + // Don't bother grabbing a lock as we're going to blow it all away + // anyway. + let target_dir = ws.target_dir().into_path_unlocked(); + paths::remove_dir_all(&target_dir)?; + } + + Ok(()) +} + +fn check_overwrites( + dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1, + force: bool, +) -> CargoResult>> { + // If explicit --bin or --example flags were passed then those'll + // get checked during cargo_compile, we only care about the "build + // everything" case here + if !filter.is_specific() && !pkg.targets().iter().any(|t| t.is_bin()) { + failure::bail!("specified package has no binaries") + } + let duplicates = find_duplicates(dst, pkg, filter, prev); + if force || duplicates.is_empty() { + return Ok(duplicates); + } + // Format the error message. + let mut msg = String::new(); + for (bin, p) in duplicates.iter() { + msg.push_str(&format!("binary `{}` already exists in destination", bin)); + if let Some(p) = p.as_ref() { + msg.push_str(&format!(" as part of `{}`\n", p)); + } else { + msg.push_str("\n"); + } + } + msg.push_str("Add --force to overwrite"); + Err(failure::format_err!("{}", msg)) +} + +fn find_duplicates( + dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1, +) -> BTreeMap> { + let check = |name: String| { + // Need to provide type, works around Rust Issue #93349 + let name = format!("{}{}", name, env::consts::EXE_SUFFIX); + if fs::metadata(dst.join(&name)).is_err() { + None + } else if let Some((&p, _)) = prev.v1().iter().find(|&(_, v)| v.contains(&name)) { + Some((name, Some(p))) + } else { + Some((name, None)) + } + }; + match *filter { + CompileFilter::Default { .. } => pkg + .targets() + .iter() + .filter(|t| t.is_bin()) + .filter_map(|t| check(t.name().to_string())) + .collect(), + CompileFilter::Only { + ref bins, + ref examples, + .. + } => { + let all_bins: Vec = bins.try_collect().unwrap_or_else(|| { + pkg.targets() + .iter() + .filter(|t| t.is_bin()) + .map(|t| t.name().to_string()) + .collect() + }); + let all_examples: Vec = examples.try_collect().unwrap_or_else(|| { + pkg.targets() + .iter() + .filter(|t| t.is_bin_example()) + .map(|t| t.name().to_string()) + .collect() + }); + + all_bins + .iter() + .chain(all_examples.iter()) + .filter_map(|t| check(t.clone())) + .collect::>>() + } + } +} + +pub fn install_list(dst: Option<&str>, config: &Config) -> CargoResult<()> { + let dst = resolve_root(dst, config)?; + let dst = metadata(config, &dst)?; + let list = read_crate_list(&dst)?; + for (k, v) in list.v1().iter() { + println!("{}:", k); + for bin in v { + println!(" {}", bin); + } + } + Ok(()) +} diff --git a/src/cargo/ops/cargo_new.rs b/src/cargo/ops/cargo_new.rs new file mode 100644 index 000000000..bc27ddf0b --- /dev/null +++ b/src/cargo/ops/cargo_new.rs @@ -0,0 +1,767 @@ +use std::collections::BTreeMap; +use std::env; +use std::fmt; +use std::fs; +use std::io::{BufRead, BufReader, ErrorKind}; +use std::path::{Path, PathBuf}; + +use git2::Config as GitConfig; +use git2::Repository as GitRepository; + +use crate::core::{compiler, Workspace}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{existing_vcs_repo, internal, FossilRepo, GitRepo, HgRepo, PijulRepo}; +use crate::util::{paths, validate_package_name, Config}; + +use toml; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum VersionControl { + Git, + Hg, + Pijul, + Fossil, + NoVcs, +} + +#[derive(Debug)] +pub struct NewOptions { + pub version_control: Option, + pub kind: NewProjectKind, + /// Absolute path to the directory for the new package + pub path: PathBuf, + pub name: Option, + pub edition: Option, + pub registry: Option, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum NewProjectKind { + Bin, + Lib, +} + +impl NewProjectKind { + fn is_bin(self) -> bool { + self == NewProjectKind::Bin + } +} + +impl fmt::Display for NewProjectKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + NewProjectKind::Bin => "binary (application)", + NewProjectKind::Lib => "library", + } + .fmt(f) + } +} + +struct SourceFileInformation { + relative_path: String, + target_name: String, + bin: bool, +} + +struct MkOptions<'a> { + version_control: Option, + path: &'a Path, + name: &'a str, + source_files: Vec, + bin: bool, + edition: Option<&'a str>, + registry: Option<&'a str>, +} + +impl NewOptions { + pub fn new( + version_control: Option, + bin: bool, + lib: bool, + path: PathBuf, + name: Option, + edition: Option, + registry: Option, + ) -> CargoResult { + let kind = match (bin, lib) { + (true, true) => failure::bail!("can't specify both lib and binary outputs"), + (false, true) => NewProjectKind::Lib, + // default to bin + (_, false) => NewProjectKind::Bin, + }; + + let opts = NewOptions { + version_control, + kind, + path, + name, + edition, + registry, + }; + Ok(opts) + } +} + +struct CargoNewConfig { + name: Option, + email: Option, + version_control: Option, +} + +fn get_name<'a>(path: &'a Path, opts: &'a NewOptions) -> CargoResult<&'a str> { + if let Some(ref name) = opts.name { + return Ok(name); + } + + let file_name = path.file_name().ok_or_else(|| { + failure::format_err!( + "cannot auto-detect package name from path {:?} ; use --name to override", + path.as_os_str() + ) + })?; + + file_name.to_str().ok_or_else(|| { + failure::format_err!( + "cannot create package with a non-unicode name: {:?}", + file_name + ) + }) +} + +fn check_name(name: &str, opts: &NewOptions) -> CargoResult<()> { + // If --name is already used to override, no point in suggesting it + // again as a fix. + let name_help = match opts.name { + Some(_) => "", + None => "\nuse --name to override crate name", + }; + + // Ban keywords + test list found at + // https://doc.rust-lang.org/grammar.html#keywords + let blacklist = [ + "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", "do", + "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", "let", "loop", + "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", "proc", "pub", + "pure", "ref", "return", "self", "sizeof", "static", "struct", "super", "test", "trait", + "true", "type", "typeof", "unsafe", "unsized", "use", "virtual", "where", "while", "yield", + ]; + if blacklist.contains(&name) || (opts.kind.is_bin() && compiler::is_bad_artifact_name(name)) { + failure::bail!( + "The name `{}` cannot be used as a crate name{}", + name, + name_help + ) + } + + if let Some(ref c) = name.chars().nth(0) { + if c.is_digit(10) { + failure::bail!( + "Package names starting with a digit cannot be used as a crate name{}", + name_help + ) + } + } + + validate_package_name(name, "crate name", name_help)?; + Ok(()) +} + +fn detect_source_paths_and_types( + package_path: &Path, + package_name: &str, + detected_files: &mut Vec, +) -> CargoResult<()> { + let path = package_path; + let name = package_name; + + enum H { + Bin, + Lib, + Detect, + } + + struct Test { + proposed_path: String, + handling: H, + } + + let tests = vec![ + Test { + proposed_path: "src/main.rs".to_string(), + handling: H::Bin, + }, + Test { + proposed_path: "main.rs".to_string(), + handling: H::Bin, + }, + Test { + proposed_path: format!("src/{}.rs", name), + handling: H::Detect, + }, + Test { + proposed_path: format!("{}.rs", name), + handling: H::Detect, + }, + Test { + proposed_path: "src/lib.rs".to_string(), + handling: H::Lib, + }, + Test { + proposed_path: "lib.rs".to_string(), + handling: H::Lib, + }, + ]; + + for i in tests { + let pp = i.proposed_path; + + // path/pp does not exist or is not a file + if !fs::metadata(&path.join(&pp)) + .map(|x| x.is_file()) + .unwrap_or(false) + { + continue; + } + + let sfi = match i.handling { + H::Bin => SourceFileInformation { + relative_path: pp, + target_name: package_name.to_string(), + bin: true, + }, + H::Lib => SourceFileInformation { + relative_path: pp, + target_name: package_name.to_string(), + bin: false, + }, + H::Detect => { + let content = paths::read(&path.join(pp.clone()))?; + let isbin = content.contains("fn main"); + SourceFileInformation { + relative_path: pp, + target_name: package_name.to_string(), + bin: isbin, + } + } + }; + detected_files.push(sfi); + } + + // Check for duplicate lib attempt + + let mut previous_lib_relpath: Option<&str> = None; + let mut duplicates_checker: BTreeMap<&str, &SourceFileInformation> = BTreeMap::new(); + + for i in detected_files { + if i.bin { + if let Some(x) = BTreeMap::get::(&duplicates_checker, i.target_name.as_ref()) { + failure::bail!( + "\ +multiple possible binary sources found: + {} + {} +cannot automatically generate Cargo.toml as the main target would be ambiguous", + &x.relative_path, + &i.relative_path + ); + } + duplicates_checker.insert(i.target_name.as_ref(), i); + } else { + if let Some(plp) = previous_lib_relpath { + failure::bail!( + "cannot have a package with \ + multiple libraries, \ + found both `{}` and `{}`", + plp, + i.relative_path + ) + } + previous_lib_relpath = Some(&i.relative_path); + } + } + + Ok(()) +} + +fn plan_new_source_file(bin: bool, package_name: String) -> SourceFileInformation { + if bin { + SourceFileInformation { + relative_path: "src/main.rs".to_string(), + target_name: package_name, + bin: true, + } + } else { + SourceFileInformation { + relative_path: "src/lib.rs".to_string(), + target_name: package_name, + bin: false, + } + } +} + +pub fn new(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = &opts.path; + if fs::metadata(path).is_ok() { + failure::bail!( + "destination `{}` already exists\n\n\ + Use `cargo init` to initialize the directory", + path.display() + ) + } + + let name = get_name(path, opts)?; + check_name(name, opts)?; + + let mkopts = MkOptions { + version_control: opts.version_control, + path, + name, + source_files: vec![plan_new_source_file(opts.kind.is_bin(), name.to_string())], + bin: opts.kind.is_bin(), + edition: opts.edition.as_ref().map(|s| &**s), + registry: opts.registry.as_ref().map(|s| &**s), + }; + + mk(config, &mkopts).chain_err(|| { + failure::format_err!( + "Failed to create package `{}` at `{}`", + name, + path.display() + ) + })?; + Ok(()) +} + +pub fn init(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = &opts.path; + + if fs::metadata(&path.join("Cargo.toml")).is_ok() { + failure::bail!("`cargo init` cannot be run on existing Cargo packages") + } + + let name = get_name(path, opts)?; + check_name(name, opts)?; + + let mut src_paths_types = vec![]; + + detect_source_paths_and_types(path, name, &mut src_paths_types)?; + + if src_paths_types.is_empty() { + src_paths_types.push(plan_new_source_file(opts.kind.is_bin(), name.to_string())); + } else { + // --bin option may be ignored if lib.rs or src/lib.rs present + // Maybe when doing `cargo init --bin` inside a library package stub, + // user may mean "initialize for library, but also add binary target" + } + + let mut version_control = opts.version_control; + + if version_control == None { + let mut num_detected_vsces = 0; + + if fs::metadata(&path.join(".git")).is_ok() { + version_control = Some(VersionControl::Git); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".hg")).is_ok() { + version_control = Some(VersionControl::Hg); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".pijul")).is_ok() { + version_control = Some(VersionControl::Pijul); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".fossil")).is_ok() { + version_control = Some(VersionControl::Fossil); + num_detected_vsces += 1; + } + + // if none exists, maybe create git, like in `cargo new` + + if num_detected_vsces > 1 { + failure::bail!( + "more than one of .hg, .git, .pijul, .fossil configurations \ + found and the ignore file can't be filled in as \ + a result. specify --vcs to override detection" + ); + } + } + + let mkopts = MkOptions { + version_control, + path, + name, + bin: src_paths_types.iter().any(|x| x.bin), + source_files: src_paths_types, + edition: opts.edition.as_ref().map(|s| &**s), + registry: opts.registry.as_ref().map(|s| &**s), + }; + + mk(config, &mkopts).chain_err(|| { + failure::format_err!( + "Failed to create package `{}` at `{}`", + name, + path.display() + ) + })?; + Ok(()) +} + +/// IgnoreList +struct IgnoreList { + /// git like formatted entries + ignore: Vec, + /// mercurial formatted entries + hg_ignore: Vec, +} + +impl IgnoreList { + /// constructor to build a new ignore file + fn new() -> IgnoreList { + IgnoreList { + ignore: Vec::new(), + hg_ignore: Vec::new(), + } + } + + /// add a new entry to the ignore list. Requires two arguments with the + /// entry in two different formats. One for "git style" entries and one for + /// "mercurial like" entries. + fn push(&mut self, ignore: &str, hg_ignore: &str) { + self.ignore.push(ignore.to_string()); + self.hg_ignore.push(hg_ignore.to_string()); + } + + /// Return the correctly formatted content of the ignore file for the given + /// version control system as `String`. + fn format_new(&self, vcs: VersionControl) -> String { + match vcs { + VersionControl::Hg => self.hg_ignore.join("\n"), + _ => self.ignore.join("\n"), + } + } + + /// format_existing is used to format the IgnoreList when the ignore file + /// already exists. It reads the contents of the given `BufRead` and + /// checks if the contents of the ignore list are already existing in the + /// file. + fn format_existing(&self, existing: T, vcs: VersionControl) -> String { + // TODO: is unwrap safe? + let existing_items = existing.lines().collect::, _>>().unwrap(); + + let ignore_items = match vcs { + VersionControl::Hg => &self.hg_ignore, + _ => &self.ignore, + }; + + let mut out = "\n\n#Added by cargo\n\ + #\n\ + #already existing elements are commented out\n" + .to_string(); + + for item in ignore_items { + out.push('\n'); + if existing_items.contains(item) { + out.push('#'); + } + out.push_str(item) + } + + out + } +} + +/// Writes the ignore file to the given directory. If the ignore file for the +/// given vcs system already exists, its content is read and duplicate ignore +/// file entries are filtered out. +fn write_ignore_file( + base_path: &Path, + list: &IgnoreList, + vcs: VersionControl, +) -> CargoResult { + let fp_ignore = match vcs { + VersionControl::Git => base_path.join(".gitignore"), + VersionControl::Hg => base_path.join(".hgignore"), + VersionControl::Pijul => base_path.join(".ignore"), + VersionControl::Fossil => return Ok("".to_string()), + VersionControl::NoVcs => return Ok("".to_string()), + }; + + let ignore: String = match fs::File::open(&fp_ignore) { + Err(why) => match why.kind() { + ErrorKind::NotFound => list.format_new(vcs), + _ => return Err(failure::format_err!("{}", why)), + }, + Ok(file) => list.format_existing(BufReader::new(file), vcs), + }; + + paths::append(&fp_ignore, ignore.as_bytes())?; + + Ok(ignore) +} + +/// Initializes the correct VCS system based on the provided config. +fn init_vcs(path: &Path, vcs: VersionControl, config: &Config) -> CargoResult<()> { + match vcs { + VersionControl::Git => { + if !path.join(".git").exists() { + GitRepo::init(path, config.cwd())?; + } + } + VersionControl::Hg => { + if !path.join(".hg").exists() { + HgRepo::init(path, config.cwd())?; + } + } + VersionControl::Pijul => { + if !path.join(".pijul").exists() { + PijulRepo::init(path, config.cwd())?; + } + } + VersionControl::Fossil => { + if path.join(".fossil").exists() { + FossilRepo::init(path, config.cwd())?; + } + } + VersionControl::NoVcs => { + fs::create_dir_all(path)?; + } + }; + + Ok(()) +} + +fn mk(config: &Config, opts: &MkOptions<'_>) -> CargoResult<()> { + let path = opts.path; + let name = opts.name; + let cfg = global_config(config)?; + + // Using the push method with two arguments ensures that the entries for + // both `ignore` and `hgignore` are in sync. + let mut ignore = IgnoreList::new(); + ignore.push("/target", "^target/"); + ignore.push("**/*.rs.bk", "glob:*.rs.bk\n"); + if !opts.bin { + ignore.push("Cargo.lock", "glob:Cargo.lock"); + } + + let vcs = opts.version_control.unwrap_or_else(|| { + let in_existing_vcs = existing_vcs_repo(path.parent().unwrap_or(path), config.cwd()); + match (cfg.version_control, in_existing_vcs) { + (None, false) => VersionControl::Git, + (Some(opt), false) => opt, + (_, true) => VersionControl::NoVcs, + } + }); + + init_vcs(path, vcs, config)?; + write_ignore_file(path, &ignore, vcs)?; + + let (author_name, email) = discover_author()?; + let author = match (cfg.name, cfg.email, author_name, email) { + (Some(name), Some(email), _, _) + | (Some(name), None, _, Some(email)) + | (None, Some(email), name, _) + | (None, None, name, Some(email)) => format!("{} <{}>", name, email), + (Some(name), None, _, None) | (None, None, name, None) => name, + }; + + let mut cargotoml_path_specifier = String::new(); + + // Calculate what `[lib]` and `[[bin]]`s we need to append to `Cargo.toml`. + + for i in &opts.source_files { + if i.bin { + if i.relative_path != "src/main.rs" { + cargotoml_path_specifier.push_str(&format!( + r#" +[[bin]] +name = "{}" +path = {} +"#, + i.target_name, + toml::Value::String(i.relative_path.clone()) + )); + } + } else if i.relative_path != "src/lib.rs" { + cargotoml_path_specifier.push_str(&format!( + r#" +[lib] +name = "{}" +path = {} +"#, + i.target_name, + toml::Value::String(i.relative_path.clone()) + )); + } + } + + // Create `Cargo.toml` file with necessary `[lib]` and `[[bin]]` sections, if needed. + + paths::write( + &path.join("Cargo.toml"), + format!( + r#"[package] +name = "{}" +version = "0.1.0" +authors = [{}] +edition = {} +{} +[dependencies] +{}"#, + name, + toml::Value::String(author), + match opts.edition { + Some(edition) => toml::Value::String(edition.to_string()), + None => toml::Value::String("2018".to_string()), + }, + match opts.registry { + Some(registry) => format!( + "publish = {}\n", + toml::Value::Array(vec!(toml::Value::String(registry.to_string()))) + ), + None => "".to_string(), + }, + cargotoml_path_specifier + ) + .as_bytes(), + )?; + + // Create all specified source files (with respective parent directories) if they don't exist. + + for i in &opts.source_files { + let path_of_source_file = path.join(i.relative_path.clone()); + + if let Some(src_dir) = path_of_source_file.parent() { + fs::create_dir_all(src_dir)?; + } + + let default_file_content: &[u8] = if i.bin { + b"\ +fn main() { + println!(\"Hello, world!\"); +} +" + } else { + b"\ +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} +" + }; + + if !fs::metadata(&path_of_source_file) + .map(|x| x.is_file()) + .unwrap_or(false) + { + paths::write(&path_of_source_file, default_file_content)?; + } + } + + if let Err(e) = Workspace::new(&path.join("Cargo.toml"), config) { + let msg = format!( + "compiling this new crate may not work due to invalid \ + workspace configuration\n\n{}", + e + ); + config.shell().warn(msg)?; + } + + Ok(()) +} + +fn get_environment_variable(variables: &[&str]) -> Option { + variables.iter().filter_map(|var| env::var(var).ok()).next() +} + +fn discover_author() -> CargoResult<(String, Option)> { + let cwd = env::current_dir()?; + let git_config = if let Ok(repo) = GitRepository::discover(&cwd) { + repo.config() + .ok() + .or_else(|| GitConfig::open_default().ok()) + } else { + GitConfig::open_default().ok() + }; + let git_config = git_config.as_ref(); + let name_variables = [ + "CARGO_NAME", + "GIT_AUTHOR_NAME", + "GIT_COMMITTER_NAME", + "USER", + "USERNAME", + "NAME", + ]; + let name = get_environment_variable(&name_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.name").ok())) + .or_else(|| get_environment_variable(&name_variables[3..])); + + let name = match name { + Some(name) => name, + None => { + let username_var = if cfg!(windows) { "USERNAME" } else { "USER" }; + failure::bail!( + "could not determine the current user, please set ${}", + username_var + ) + } + }; + let email_variables = [ + "CARGO_EMAIL", + "GIT_AUTHOR_EMAIL", + "GIT_COMMITTER_EMAIL", + "EMAIL", + ]; + let email = get_environment_variable(&email_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.email").ok())) + .or_else(|| get_environment_variable(&email_variables[3..])); + + let name = name.trim().to_string(); + let email = email.map(|s| { + let mut s = s.trim(); + + // In some cases emails will already have <> remove them since they + // are already added when needed. + if s.starts_with('<') && s.ends_with('>') { + s = &s[1..s.len() - 1]; + } + + s.to_string() + }); + + Ok((name, email)) +} + +fn global_config(config: &Config) -> CargoResult { + let name = config.get_string("cargo-new.name")?.map(|s| s.val); + let email = config.get_string("cargo-new.email")?.map(|s| s.val); + let vcs = config.get_string("cargo-new.vcs")?; + + let vcs = match vcs.as_ref().map(|p| (&p.val[..], &p.definition)) { + Some(("git", _)) => Some(VersionControl::Git), + Some(("hg", _)) => Some(VersionControl::Hg), + Some(("pijul", _)) => Some(VersionControl::Pijul), + Some(("none", _)) => Some(VersionControl::NoVcs), + Some((s, p)) => { + return Err(internal(format!( + "invalid configuration for key \ + `cargo-new.vcs`, unknown vcs `{}` \ + (found in {})", + s, p + ))); + } + None => None, + }; + Ok(CargoNewConfig { + name, + email, + version_control: vcs, + }) +} diff --git a/src/cargo/ops/cargo_output_metadata.rs b/src/cargo/ops/cargo_output_metadata.rs new file mode 100644 index 000000000..1f08dd2cb --- /dev/null +++ b/src/cargo/ops/cargo_output_metadata.rs @@ -0,0 +1,138 @@ +use std::collections::HashMap; +use std::path::PathBuf; + +use serde::ser; +use serde::Serialize; + +use crate::core::resolver::Resolve; +use crate::core::{Package, PackageId, Workspace}; +use crate::ops::{self, Packages}; +use crate::util::CargoResult; + +const VERSION: u32 = 1; + +pub struct OutputMetadataOptions { + pub features: Vec, + pub no_default_features: bool, + pub all_features: bool, + pub no_deps: bool, + pub version: u32, +} + +/// Loads the manifest, resolves the dependencies of the package to the concrete +/// used versions - considering overrides - and writes all dependencies in a JSON +/// format to stdout. +pub fn output_metadata(ws: &Workspace<'_>, opt: &OutputMetadataOptions) -> CargoResult { + if opt.version != VERSION { + failure::bail!( + "metadata version {} not supported, only {} is currently supported", + opt.version, + VERSION + ); + } + if opt.no_deps { + metadata_no_deps(ws, opt) + } else { + metadata_full(ws, opt) + } +} + +fn metadata_no_deps(ws: &Workspace<'_>, _opt: &OutputMetadataOptions) -> CargoResult { + Ok(ExportInfo { + packages: ws.members().cloned().collect(), + workspace_members: ws.members().map(|pkg| pkg.package_id()).collect(), + resolve: None, + target_directory: ws.target_dir().clone().into_path_unlocked(), + version: VERSION, + workspace_root: ws.root().to_path_buf(), + }) +} + +fn metadata_full(ws: &Workspace<'_>, opt: &OutputMetadataOptions) -> CargoResult { + let specs = Packages::All.to_package_id_specs(ws)?; + let (package_set, resolve) = ops::resolve_ws_precisely( + ws, + None, + &opt.features, + opt.all_features, + opt.no_default_features, + &specs, + )?; + let mut packages = HashMap::new(); + for pkg in package_set.get_many(package_set.package_ids())? { + packages.insert(pkg.package_id(), pkg.clone()); + } + + Ok(ExportInfo { + packages: packages.values().map(|p| (*p).clone()).collect(), + workspace_members: ws.members().map(|pkg| pkg.package_id()).collect(), + resolve: Some(MetadataResolve { + resolve: (packages, resolve), + root: ws.current_opt().map(|pkg| pkg.package_id()), + }), + target_directory: ws.target_dir().clone().into_path_unlocked(), + version: VERSION, + workspace_root: ws.root().to_path_buf(), + }) +} + +#[derive(Serialize)] +pub struct ExportInfo { + packages: Vec, + workspace_members: Vec, + resolve: Option, + target_directory: PathBuf, + version: u32, + workspace_root: PathBuf, +} + +/// Newtype wrapper to provide a custom `Serialize` implementation. +/// The one from lock file does not fit because it uses a non-standard +/// format for `PackageId`s +#[derive(Serialize)] +struct MetadataResolve { + #[serde(rename = "nodes", serialize_with = "serialize_resolve")] + resolve: (HashMap, Resolve), + root: Option, +} + +fn serialize_resolve( + (packages, resolve): &(HashMap, Resolve), + s: S, +) -> Result +where + S: ser::Serializer, +{ + #[derive(Serialize)] + struct Dep { + name: Option, + pkg: PackageId, + } + + #[derive(Serialize)] + struct Node<'a> { + id: PackageId, + dependencies: Vec, + deps: Vec, + features: Vec<&'a str>, + } + + s.collect_seq(resolve.iter().map(|id| { + Node { + id, + dependencies: resolve.deps(id).map(|(pkg, _deps)| pkg).collect(), + deps: resolve + .deps(id) + .map(|(pkg, _deps)| { + let name = packages + .get(&pkg) + .and_then(|pkg| pkg.targets().iter().find(|t| t.is_lib())) + .and_then(|lib_target| resolve.extern_crate_name(id, pkg, lib_target).ok()); + + Dep { name, pkg } + }) + .collect(), + features: resolve.features_sorted(id), + } + })) +} diff --git a/src/cargo/ops/cargo_package.rs b/src/cargo/ops/cargo_package.rs new file mode 100644 index 000000000..93a679f69 --- /dev/null +++ b/src/cargo/ops/cargo_package.rs @@ -0,0 +1,511 @@ +use std::fs::{self, File}; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::path::{self, Path, PathBuf}; +use std::sync::Arc; + +use flate2::read::GzDecoder; +use flate2::{Compression, GzBuilder}; +use log::debug; +use serde_json::{self, json}; +use tar::{Archive, Builder, EntryType, Header}; + +use crate::core::compiler::{BuildConfig, CompileMode, DefaultExecutor, Executor}; +use crate::core::{Package, Source, SourceId, Workspace}; +use crate::ops; +use crate::sources::PathSource; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::{self, internal, Config, FileLock}; + +pub struct PackageOpts<'cfg> { + pub config: &'cfg Config, + pub list: bool, + pub check_metadata: bool, + pub allow_dirty: bool, + pub verify: bool, + pub jobs: Option, + pub target: Option, + pub features: Vec, + pub all_features: bool, + pub no_default_features: bool, +} + +static VCS_INFO_FILE: &'static str = ".cargo_vcs_info.json"; + +pub fn package(ws: &Workspace<'_>, opts: &PackageOpts<'_>) -> CargoResult> { + ops::resolve_ws(ws)?; + let pkg = ws.current()?; + let config = ws.config(); + + let mut src = PathSource::new(pkg.root(), pkg.package_id().source_id(), config); + src.update()?; + + if opts.check_metadata { + check_metadata(pkg, config)?; + } + + verify_dependencies(pkg)?; + + // `list_files` outputs warnings as a side effect, so only do it once. + let src_files = src.list_files(pkg)?; + + // Make sure a VCS info file is not included in source, regardless of if + // we produced the file above, and in particular if we did not. + check_vcs_file_collision(pkg, &src_files)?; + + // Check (git) repository state, getting the current commit hash if not + // dirty. This will `bail!` if dirty, unless allow_dirty. Produce json + // info for any sha1 (HEAD revision) returned. + let vcs_info = if !opts.allow_dirty { + check_repo_state(pkg, &src_files, &config, opts.allow_dirty)? + .map(|h| json!({"git":{"sha1": h}})) + } else { + None + }; + + if opts.list { + let root = pkg.root(); + let mut list: Vec<_> = src + .list_files(pkg)? + .iter() + .map(|file| file.strip_prefix(root).unwrap().to_path_buf()) + .collect(); + if include_lockfile(pkg) { + list.push("Cargo.lock".into()); + } + if vcs_info.is_some() { + list.push(Path::new(VCS_INFO_FILE).to_path_buf()); + } + list.sort_unstable(); + for file in list.iter() { + println!("{}", file.display()); + } + return Ok(None); + } + + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let dir = ws.target_dir().join("package"); + let mut dst = { + let tmp = format!(".{}", filename); + dir.open_rw(&tmp, config, "package scratch space")? + }; + + // Package up and test a temporary tarball and only move it to the final + // location if it actually passes all our tests. Any previously existing + // tarball can be assumed as corrupt or invalid, so we just blow it away if + // it exists. + config + .shell() + .status("Packaging", pkg.package_id().to_string())?; + dst.file().set_len(0)?; + tar(ws, &src_files, vcs_info.as_ref(), dst.file(), &filename) + .chain_err(|| failure::format_err!("failed to prepare local package for uploading"))?; + if opts.verify { + dst.seek(SeekFrom::Start(0))?; + run_verify(ws, &dst, opts).chain_err(|| "failed to verify package tarball")? + } + dst.seek(SeekFrom::Start(0))?; + { + let src_path = dst.path(); + let dst_path = dst.parent().join(&filename); + fs::rename(&src_path, &dst_path) + .chain_err(|| "failed to move temporary tarball into final location")?; + } + Ok(Some(dst)) +} + +fn include_lockfile(pkg: &Package) -> bool { + pkg.manifest().publish_lockfile() && pkg.targets().iter().any(|t| t.is_example() || t.is_bin()) +} + +// Checks that the package has some piece of metadata that a human can +// use to tell what the package is about. +fn check_metadata(pkg: &Package, config: &Config) -> CargoResult<()> { + let md = pkg.manifest().metadata(); + + let mut missing = vec![]; + + macro_rules! lacking { + ($( $($field: ident)||* ),*) => {{ + $( + if $(md.$field.as_ref().map_or(true, |s| s.is_empty()))&&* { + $(missing.push(stringify!($field).replace("_", "-"));)* + } + )* + }} + } + lacking!( + description, + license || license_file, + documentation || homepage || repository + ); + + if !missing.is_empty() { + let mut things = missing[..missing.len() - 1].join(", "); + // `things` will be empty if and only if its length is 1 (i.e., the only case + // to have no `or`). + if !things.is_empty() { + things.push_str(" or "); + } + things.push_str(missing.last().unwrap()); + + config.shell().warn(&format!( + "manifest has no {things}.\n\ + See for more info.", + things = things + ))? + } + Ok(()) +} + +// Checks that the package dependencies are safe to deploy. +fn verify_dependencies(pkg: &Package) -> CargoResult<()> { + for dep in pkg.dependencies() { + if dep.source_id().is_path() && !dep.specified_req() { + failure::bail!( + "all path dependencies must have a version specified \ + when packaging.\ndependency `{}` does not specify \ + a version.", + dep.name_in_toml() + ) + } + } + Ok(()) +} + +// Checks if the package source is in a *git* DVCS repository. If *git*, and +// the source is *dirty* (e.g., has uncommited changes) and not `allow_dirty` +// then `bail!` with an informative message. Otherwise return the sha1 hash of +// the current *HEAD* commit, or `None` if *dirty*. +fn check_repo_state( + p: &Package, + src_files: &[PathBuf], + config: &Config, + allow_dirty: bool, +) -> CargoResult> { + if let Ok(repo) = git2::Repository::discover(p.root()) { + if let Some(workdir) = repo.workdir() { + debug!("found a git repo at {:?}", workdir); + let path = p.manifest_path(); + let path = path.strip_prefix(workdir).unwrap_or(path); + if let Ok(status) = repo.status_file(path) { + if (status & git2::Status::IGNORED).is_empty() { + debug!( + "found (git) Cargo.toml at {:?} in workdir {:?}", + path, workdir + ); + return git(p, src_files, &repo, allow_dirty); + } + } + config.shell().verbose(|shell| { + shell.warn(format!( + "No (git) Cargo.toml found at `{}` in workdir `{}`", + path.display(), + workdir.display() + )) + })?; + } + } else { + config.shell().verbose(|shell| { + shell.warn(format!("No (git) VCS found for `{}`", p.root().display())) + })?; + } + + // No VCS with a checked in `Cargo.toml` found, so we don't know if the + // directory is dirty or not, thus we have to assume that it's clean. + return Ok(None); + + fn git( + p: &Package, + src_files: &[PathBuf], + repo: &git2::Repository, + allow_dirty: bool, + ) -> CargoResult> { + let workdir = repo.workdir().unwrap(); + let dirty = src_files + .iter() + .filter(|file| { + let relative = file.strip_prefix(workdir).unwrap(); + if let Ok(status) = repo.status_file(relative) { + status != git2::Status::CURRENT + } else { + false + } + }) + .map(|path| { + path.strip_prefix(p.root()) + .unwrap_or(path) + .display() + .to_string() + }) + .collect::>(); + if dirty.is_empty() { + let rev_obj = repo.revparse_single("HEAD")?; + Ok(Some(rev_obj.id().to_string())) + } else { + if !allow_dirty { + failure::bail!( + "{} files in the working directory contain changes that were \ + not yet committed into git:\n\n{}\n\n\ + to proceed despite this, pass the `--allow-dirty` flag", + dirty.len(), + dirty.join("\n") + ) + } + Ok(None) + } + } +} + +// Checks for and `bail!` if a source file matches `ROOT/VCS_INFO_FILE`, since +// this is now a Cargo reserved file name, and we don't want to allow forgery. +fn check_vcs_file_collision(pkg: &Package, src_files: &[PathBuf]) -> CargoResult<()> { + let root = pkg.root(); + let vcs_info_path = Path::new(VCS_INFO_FILE); + let collision = src_files + .iter() + .find(|&p| p.strip_prefix(root).unwrap() == vcs_info_path); + if collision.is_some() { + failure::bail!( + "Invalid inclusion of reserved file name \ + {} in package source", + VCS_INFO_FILE + ); + } + Ok(()) +} + +fn tar( + ws: &Workspace<'_>, + src_files: &[PathBuf], + vcs_info: Option<&serde_json::Value>, + dst: &File, + filename: &str, +) -> CargoResult<()> { + // Prepare the encoder and its header. + let filename = Path::new(filename); + let encoder = GzBuilder::new() + .filename(util::path2bytes(filename)?) + .write(dst, Compression::best()); + + // Put all package files into a compressed archive. + let mut ar = Builder::new(encoder); + let pkg = ws.current()?; + let config = ws.config(); + let root = pkg.root(); + for file in src_files.iter() { + let relative = file.strip_prefix(root)?; + check_filename(relative)?; + let relative = relative.to_str().ok_or_else(|| { + failure::format_err!("non-utf8 path in source directory: {}", relative.display()) + })?; + config + .shell() + .verbose(|shell| shell.status("Archiving", &relative))?; + let path = format!( + "{}-{}{}{}", + pkg.name(), + pkg.version(), + path::MAIN_SEPARATOR, + relative + ); + + // The `tar::Builder` type by default will build GNU archives, but + // unfortunately we force it here to use UStar archives instead. The + // UStar format has more limitations on the length of path name that it + // can encode, so it's not quite as nice to use. + // + // Older cargos, however, had a bug where GNU archives were interpreted + // as UStar archives. This bug means that if we publish a GNU archive + // which has fully filled out metadata it'll be corrupt when unpacked by + // older cargos. + // + // Hopefully in the future after enough cargos have been running around + // with the bugfixed tar-rs library we'll be able to switch this over to + // GNU archives, but for now we'll just say that you can't encode paths + // in archives that are *too* long. + // + // For an instance of this in the wild, use the tar-rs 0.3.3 library to + // unpack the selectors 0.4.0 crate on crates.io. Either that or take a + // look at rust-lang/cargo#2326. + let mut header = Header::new_ustar(); + header + .set_path(&path) + .chain_err(|| format!("failed to add to archive: `{}`", relative))?; + let mut file = File::open(file) + .chain_err(|| format!("failed to open for archiving: `{}`", file.display()))?; + let metadata = file + .metadata() + .chain_err(|| format!("could not learn metadata for: `{}`", relative))?; + header.set_metadata(&metadata); + + if relative == "Cargo.toml" { + let orig = Path::new(&path).with_file_name("Cargo.toml.orig"); + header.set_path(&orig)?; + header.set_cksum(); + ar.append(&header, &mut file) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + + let mut header = Header::new_ustar(); + let toml = pkg.to_registry_toml(ws.config())?; + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(toml.len() as u64); + header.set_cksum(); + ar.append(&header, toml.as_bytes()) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + } else { + header.set_cksum(); + ar.append(&header, &mut file) + .chain_err(|| internal(format!("could not archive source file `{}`", relative)))?; + } + } + + if let Some(ref json) = vcs_info { + let filename: PathBuf = Path::new(VCS_INFO_FILE).into(); + debug_assert!(check_filename(&filename).is_ok()); + let fnd = filename.display(); + config + .shell() + .verbose(|shell| shell.status("Archiving", &fnd))?; + let path = format!( + "{}-{}{}{}", + pkg.name(), + pkg.version(), + path::MAIN_SEPARATOR, + fnd + ); + let mut header = Header::new_ustar(); + header + .set_path(&path) + .chain_err(|| format!("failed to add to archive: `{}`", fnd))?; + let json = format!("{}\n", serde_json::to_string_pretty(json)?); + let mut header = Header::new_ustar(); + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(json.len() as u64); + header.set_cksum(); + ar.append(&header, json.as_bytes()) + .chain_err(|| internal(format!("could not archive source file `{}`", fnd)))?; + } + + if include_lockfile(pkg) { + let toml = paths::read(&ws.root().join("Cargo.lock"))?; + let path = format!( + "{}-{}{}Cargo.lock", + pkg.name(), + pkg.version(), + path::MAIN_SEPARATOR + ); + let mut header = Header::new_ustar(); + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(toml.len() as u64); + header.set_cksum(); + ar.append(&header, toml.as_bytes()) + .chain_err(|| internal("could not archive source file `Cargo.lock`"))?; + } + + let encoder = ar.into_inner()?; + encoder.finish()?; + Ok(()) +} + +fn run_verify(ws: &Workspace<'_>, tar: &FileLock, opts: &PackageOpts<'_>) -> CargoResult<()> { + let config = ws.config(); + let pkg = ws.current()?; + + config.shell().status("Verifying", pkg)?; + + let f = GzDecoder::new(tar.file()); + let dst = tar + .parent() + .join(&format!("{}-{}", pkg.name(), pkg.version())); + if dst.exists() { + paths::remove_dir_all(&dst)?; + } + let mut archive = Archive::new(f); + // We don't need to set the Modified Time, as it's not relevant to verification + // and it errors on filesystems that don't support setting a modified timestamp + archive.set_preserve_mtime(false); + archive.unpack(dst.parent().unwrap())?; + + // Manufacture an ephemeral workspace to ensure that even if the top-level + // package has a workspace we can still build our new crate. + let id = SourceId::for_path(&dst)?; + let mut src = PathSource::new(&dst, id, ws.config()); + let new_pkg = src.root_package()?; + let pkg_fingerprint = src.last_modified_file(&new_pkg)?; + let ws = Workspace::ephemeral(new_pkg, config, None, true)?; + + let exec: Arc = Arc::new(DefaultExecutor); + ops::compile_ws( + &ws, + None, + &ops::CompileOptions { + config, + build_config: BuildConfig::new(config, opts.jobs, &opts.target, CompileMode::Build)?, + features: opts.features.clone(), + no_default_features: opts.no_default_features, + all_features: opts.all_features, + spec: ops::Packages::Packages(Vec::new()), + filter: ops::CompileFilter::Default { + required_features_filterable: true, + }, + target_rustdoc_args: None, + target_rustc_args: None, + local_rustdoc_args: None, + export_dir: None, + }, + &exec, + )?; + + // Check that `build.rs` didn't modify any files in the `src` directory. + let ws_fingerprint = src.last_modified_file(ws.current()?)?; + if pkg_fingerprint != ws_fingerprint { + let (_, path) = ws_fingerprint; + failure::bail!( + "Source directory was modified by build.rs during cargo publish. \ + Build scripts should not modify anything outside of OUT_DIR. \ + Modified file: {}\n\n\ + To proceed despite this, pass the `--no-verify` flag.", + path.display() + ) + } + + Ok(()) +} + +// It can often be the case that files of a particular name on one platform +// can't actually be created on another platform. For example files with colons +// in the name are allowed on Unix but not on Windows. +// +// To help out in situations like this, issue about weird filenames when +// packaging as a "heads up" that something may not work on other platforms. +fn check_filename(file: &Path) -> CargoResult<()> { + let name = match file.file_name() { + Some(name) => name, + None => return Ok(()), + }; + let name = match name.to_str() { + Some(name) => name, + None => failure::bail!( + "path does not have a unicode filename which may not unpack \ + on all platforms: {}", + file.display() + ), + }; + let bad_chars = ['/', '\\', '<', '>', ':', '"', '|', '?', '*']; + if let Some(c) = bad_chars.iter().find(|c| name.contains(**c)) { + failure::bail!( + "cannot package a filename with a special character `{}`: {}", + c, + file.display() + ) + } + Ok(()) +} diff --git a/src/cargo/ops/cargo_pkgid.rs b/src/cargo/ops/cargo_pkgid.rs new file mode 100644 index 000000000..56757bd58 --- /dev/null +++ b/src/cargo/ops/cargo_pkgid.rs @@ -0,0 +1,16 @@ +use crate::core::{PackageIdSpec, Workspace}; +use crate::ops; +use crate::util::CargoResult; + +pub fn pkgid(ws: &Workspace<'_>, spec: Option<&str>) -> CargoResult { + let resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => failure::bail!("a Cargo.lock must exist for this command"), + }; + + let pkgid = match spec { + Some(spec) => PackageIdSpec::query_str(spec, resolve.iter())?, + None => ws.current()?.package_id(), + }; + Ok(PackageIdSpec::from_package_id(pkgid)) +} diff --git a/src/cargo/ops/cargo_read_manifest.rs b/src/cargo/ops/cargo_read_manifest.rs new file mode 100644 index 000000000..ddd594433 --- /dev/null +++ b/src/cargo/ops/cargo_read_manifest.rs @@ -0,0 +1,201 @@ +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use log::{info, trace}; + +use crate::core::{EitherManifest, Package, PackageId, SourceId}; +use crate::util::errors::CargoResult; +use crate::util::important_paths::find_project_manifest_exact; +use crate::util::toml::read_manifest; +use crate::util::{self, Config}; + +pub fn read_package( + path: &Path, + source_id: SourceId, + config: &Config, +) -> CargoResult<(Package, Vec)> { + trace!( + "read_package; path={}; source-id={}", + path.display(), + source_id + ); + let (manifest, nested) = read_manifest(path, source_id, config)?; + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => failure::bail!( + "found a virtual manifest at `{}` instead of a package \ + manifest", + path.display() + ), + }; + + Ok((Package::new(manifest, path), nested)) +} + +pub fn read_packages( + path: &Path, + source_id: SourceId, + config: &Config, +) -> CargoResult> { + let mut all_packages = HashMap::new(); + let mut visited = HashSet::::new(); + let mut errors = Vec::::new(); + + trace!( + "looking for root package: {}, source_id={}", + path.display(), + source_id + ); + + walk(path, &mut |dir| { + trace!("looking for child package: {}", dir.display()); + + // Don't recurse into hidden/dot directories unless we're at the toplevel + if dir != path { + let name = dir.file_name().and_then(|s| s.to_str()); + if name.map(|s| s.starts_with('.')) == Some(true) { + return Ok(false); + } + + // Don't automatically discover packages across git submodules + if fs::metadata(&dir.join(".git")).is_ok() { + return Ok(false); + } + } + + // Don't ever look at target directories + if dir.file_name().and_then(|s| s.to_str()) == Some("target") + && has_manifest(dir.parent().unwrap()) + { + return Ok(false); + } + + if has_manifest(dir) { + read_nested_packages( + dir, + &mut all_packages, + source_id, + config, + &mut visited, + &mut errors, + )?; + } + Ok(true) + })?; + + if all_packages.is_empty() { + match errors.pop() { + Some(err) => Err(err), + None => Err(failure::format_err!( + "Could not find Cargo.toml in `{}`", + path.display() + )), + } + } else { + Ok(all_packages.into_iter().map(|(_, v)| v).collect()) + } +} + +fn walk(path: &Path, callback: &mut dyn FnMut(&Path) -> CargoResult) -> CargoResult<()> { + if !callback(path)? { + trace!("not processing {}", path.display()); + return Ok(()); + } + + // Ignore any permission denied errors because temporary directories + // can often have some weird permissions on them. + let dirs = match fs::read_dir(path) { + Ok(dirs) => dirs, + Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => return Ok(()), + Err(e) => { + let cx = format!("failed to read directory `{}`", path.display()); + let e = failure::Error::from(e); + return Err(e.context(cx).into()); + } + }; + for dir in dirs { + let dir = dir?; + if dir.file_type()?.is_dir() { + walk(&dir.path(), callback)?; + } + } + Ok(()) +} + +fn has_manifest(path: &Path) -> bool { + find_project_manifest_exact(path, "Cargo.toml").is_ok() +} + +fn read_nested_packages( + path: &Path, + all_packages: &mut HashMap, + source_id: SourceId, + config: &Config, + visited: &mut HashSet, + errors: &mut Vec, +) -> CargoResult<()> { + if !visited.insert(path.to_path_buf()) { + return Ok(()); + } + + let manifest_path = find_project_manifest_exact(path, "Cargo.toml")?; + + let (manifest, nested) = match read_manifest(&manifest_path, source_id, config) { + Err(err) => { + // Ignore malformed manifests found on git repositories + // + // git source try to find and read all manifests from the repository + // but since it's not possible to exclude folders from this search + // it's safer to ignore malformed manifests to avoid + // + // TODO: Add a way to exclude folders? + info!( + "skipping malformed package found at `{}`", + path.to_string_lossy() + ); + errors.push(err.into()); + return Ok(()); + } + Ok(tuple) => tuple, + }; + + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => return Ok(()), + }; + let pkg = Package::new(manifest, &manifest_path); + + let pkg_id = pkg.package_id(); + use std::collections::hash_map::Entry; + match all_packages.entry(pkg_id) { + Entry::Vacant(v) => { + v.insert(pkg); + } + Entry::Occupied(_) => { + info!( + "skipping nested package `{}` found at `{}`", + pkg.name(), + path.to_string_lossy() + ); + } + } + + // Registry sources are not allowed to have `path=` dependencies because + // they're all translated to actual registry dependencies. + // + // We normalize the path here ensure that we don't infinitely walk around + // looking for crates. By normalizing we ensure that we visit this crate at + // most once. + // + // TODO: filesystem/symlink implications? + if !source_id.is_registry() { + for p in nested.iter() { + let path = util::normalize_path(&path.join(p)); + read_nested_packages(&path, all_packages, source_id, config, visited, errors)?; + } + } + + Ok(()) +} diff --git a/src/cargo/ops/cargo_run.rs b/src/cargo/ops/cargo_run.rs new file mode 100644 index 000000000..fe3bcea95 --- /dev/null +++ b/src/cargo/ops/cargo_run.rs @@ -0,0 +1,104 @@ +use std::iter; +use std::path::Path; + +use crate::core::{nightly_features_allowed, TargetKind, Workspace}; +use crate::ops; +use crate::util::{CargoResult, ProcessError}; + +pub fn run( + ws: &Workspace<'_>, + options: &ops::CompileOptions<'_>, + args: &[String], +) -> CargoResult> { + let config = ws.config(); + + // We compute the `bins` here *just for diagnosis*. The actual set of + // packages to be run is determined by the `ops::compile` call below. + let packages = options.spec.get_packages(ws)?; + let bins: Vec<_> = packages + .into_iter() + .flat_map(|pkg| { + iter::repeat(pkg).zip(pkg.manifest().targets().iter().filter(|target| { + !target.is_lib() + && !target.is_custom_build() + && if !options.filter.is_specific() { + target.is_bin() + } else { + options.filter.target_run(target) + } + })) + }) + .collect(); + + if bins.is_empty() { + if !options.filter.is_specific() { + failure::bail!("a bin target must be available for `cargo run`") + } else { + // This will be verified in `cargo_compile`. + } + } + + if bins.len() == 1 { + let target = bins[0].1; + if let TargetKind::ExampleLib(..) = target.kind() { + failure::bail!( + "example target `{}` is a library and cannot be executed", + target.name() + ) + } + } + + if bins.len() > 1 { + if !options.filter.is_specific() { + let names: Vec<&str> = bins + .into_iter() + .map(|(_pkg, target)| target.name()) + .collect(); + if nightly_features_allowed() { + failure::bail!( + "`cargo run` could not determine which binary to run. \ + Use the `--bin` option to specify a binary, \ + or (on nightly) the `default-run` manifest key.\n\ + available binaries: {}", + names.join(", ") + ) + } else { + failure::bail!( + "`cargo run` requires that a package only have one \ + executable; use the `--bin` option to specify which one \ + to run\navailable binaries: {}", + names.join(", ") + ) + } + } else { + failure::bail!( + "`cargo run` can run at most one executable, but \ + multiple were specified" + ) + } + } + + let compile = ops::compile(ws, options)?; + assert_eq!(compile.binaries.len(), 1); + let exe = &compile.binaries[0]; + let exe = match exe.strip_prefix(config.cwd()) { + Ok(path) if path.file_name() == Some(path.as_os_str()) => Path::new(".").join(path), + Ok(path) => path.to_path_buf(), + Err(_) => exe.to_path_buf(), + }; + let pkg = bins[0].0; + let mut process = compile.target_process(exe, pkg)?; + process.args(args).cwd(config.cwd()); + + config.shell().status("Running", process.to_string())?; + + let result = process.exec_replace(); + + match result { + Ok(()) => Ok(None), + Err(e) => { + let err = e.downcast::()?; + Ok(Some(err)) + } + } +} diff --git a/src/cargo/ops/cargo_test.rs b/src/cargo/ops/cargo_test.rs new file mode 100644 index 000000000..d335a80a8 --- /dev/null +++ b/src/cargo/ops/cargo_test.rs @@ -0,0 +1,201 @@ +use std::ffi::OsString; + +use crate::core::compiler::{Compilation, Doctest}; +use crate::core::Workspace; +use crate::ops; +use crate::util::errors::CargoResult; +use crate::util::{CargoTestError, ProcessError, Test}; + +pub struct TestOptions<'a> { + pub compile_opts: ops::CompileOptions<'a>, + pub no_run: bool, + pub no_fail_fast: bool, +} + +pub fn run_tests( + ws: &Workspace<'_>, + options: &TestOptions<'_>, + test_args: &[String], +) -> CargoResult> { + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None); + } + let (test, mut errors) = run_unit_tests(options, test_args, &compilation)?; + + // If we have an error and want to fail fast, then return. + if !errors.is_empty() && !options.no_fail_fast { + return Ok(Some(CargoTestError::new(test, errors))); + } + + let (doctest, docerrors) = run_doc_tests(options, test_args, &compilation)?; + let test = if docerrors.is_empty() { test } else { doctest }; + errors.extend(docerrors); + if errors.is_empty() { + Ok(None) + } else { + Ok(Some(CargoTestError::new(test, errors))) + } +} + +pub fn run_benches( + ws: &Workspace<'_>, + options: &TestOptions<'_>, + args: &[String], +) -> CargoResult> { + let mut args = args.to_vec(); + args.push("--bench".to_string()); + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None); + } + let (test, errors) = run_unit_tests(options, &args, &compilation)?; + match errors.len() { + 0 => Ok(None), + _ => Ok(Some(CargoTestError::new(test, errors))), + } +} + +fn compile_tests<'a>( + ws: &Workspace<'a>, + options: &TestOptions<'a>, +) -> CargoResult> { + let mut compilation = ops::compile(ws, &options.compile_opts)?; + compilation + .tests + .sort_by(|a, b| (a.0.package_id(), &a.1, &a.2).cmp(&(b.0.package_id(), &b.1, &b.2))); + Ok(compilation) +} + +/// Runs the unit and integration tests of a package. +fn run_unit_tests( + options: &TestOptions<'_>, + test_args: &[String], + compilation: &Compilation<'_>, +) -> CargoResult<(Test, Vec)> { + let config = options.compile_opts.config; + let cwd = options.compile_opts.config.cwd(); + + let mut errors = Vec::new(); + + for &(ref pkg, ref kind, ref test, ref exe) in &compilation.tests { + let exe_display = exe.strip_prefix(cwd).unwrap_or(exe).display(); + let mut cmd = compilation.target_process(exe, pkg)?; + cmd.args(test_args); + config + .shell() + .concise(|shell| shell.status("Running", &exe_display))?; + config + .shell() + .verbose(|shell| shell.status("Running", &cmd))?; + + let result = cmd.exec(); + + match result { + Err(e) => { + let e = e.downcast::()?; + errors.push((kind.clone(), test.clone(), pkg.name().to_string(), e)); + if !options.no_fail_fast { + break; + } + } + Ok(()) => {} + } + } + + if errors.len() == 1 { + let (kind, name, pkg_name, e) = errors.pop().unwrap(); + Ok(( + Test::UnitTest { + kind, + name, + pkg_name, + }, + vec![e], + )) + } else { + Ok(( + Test::Multiple, + errors.into_iter().map(|(_, _, _, e)| e).collect(), + )) + } +} + +fn run_doc_tests( + options: &TestOptions<'_>, + test_args: &[String], + compilation: &Compilation<'_>, +) -> CargoResult<(Test, Vec)> { + let mut errors = Vec::new(); + let config = options.compile_opts.config; + + // We don't build/run doc tests if `target` does not equal `host`. + if compilation.host != compilation.target { + return Ok((Test::Doc, errors)); + } + + for doctest_info in &compilation.to_doc_test { + let Doctest { + package, + target, + deps, + } = doctest_info; + config.shell().status("Doc-tests", target.name())?; + let mut p = compilation.rustdoc_process(package, target)?; + p.arg("--test") + .arg(target.src_path().path().unwrap()) + .arg("--crate-name") + .arg(&target.crate_name()); + + for &rust_dep in &[&compilation.deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(rust_dep); + p.arg("-L").arg(arg); + } + + for native_dep in compilation.native_dirs.iter() { + p.arg("-L").arg(native_dep); + } + + for &host_rust_dep in &[&compilation.host_deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(host_rust_dep); + p.arg("-L").arg(arg); + } + + for arg in test_args { + p.arg("--test-args").arg(arg); + } + + if let Some(cfgs) = compilation.cfgs.get(&package.package_id()) { + for cfg in cfgs.iter() { + p.arg("--cfg").arg(cfg); + } + } + + for &(ref extern_crate_name, ref lib) in deps.iter() { + let mut arg = OsString::from(extern_crate_name); + arg.push("="); + arg.push(lib); + p.arg("--extern").arg(&arg); + } + + if let Some(flags) = compilation.rustdocflags.get(&package.package_id()) { + p.args(flags); + } + + config + .shell() + .verbose(|shell| shell.status("Running", p.to_string()))?; + if let Err(e) = p.exec() { + let e = e.downcast::()?; + errors.push(e); + if !options.no_fail_fast { + return Ok((Test::Doc, errors)); + } + } + } + Ok((Test::Doc, errors)) +} diff --git a/src/cargo/ops/cargo_uninstall.rs b/src/cargo/ops/cargo_uninstall.rs new file mode 100644 index 000000000..3ffa7dd05 --- /dev/null +++ b/src/cargo/ops/cargo_uninstall.rs @@ -0,0 +1,157 @@ +use std::collections::btree_map::Entry; +use std::{env, fs}; + +use crate::core::PackageId; +use crate::core::{PackageIdSpec, SourceId}; +use crate::ops::common_for_install_and_uninstall::*; +use crate::util::errors::CargoResult; +use crate::util::paths; +use crate::util::Config; +use crate::util::{FileLock, Filesystem}; + +pub fn uninstall( + root: Option<&str>, + specs: Vec<&str>, + bins: &[String], + config: &Config, +) -> CargoResult<()> { + if specs.len() > 1 && !bins.is_empty() { + failure::bail!("A binary can only be associated with a single installed package, specifying multiple specs with --bin is redundant."); + } + + let root = resolve_root(root, config)?; + let scheduled_error = if specs.len() == 1 { + uninstall_one(&root, specs[0], bins, config)?; + false + } else if specs.is_empty() { + uninstall_cwd(&root, bins, config)?; + false + } else { + let mut succeeded = vec![]; + let mut failed = vec![]; + for spec in specs { + let root = root.clone(); + match uninstall_one(&root, spec, bins, config) { + Ok(()) => succeeded.push(spec), + Err(e) => { + crate::handle_error(&e, &mut config.shell()); + failed.push(spec) + } + } + } + + let mut summary = vec![]; + if !succeeded.is_empty() { + summary.push(format!( + "Successfully uninstalled {}!", + succeeded.join(", ") + )); + } + if !failed.is_empty() { + summary.push(format!( + "Failed to uninstall {} (see error(s) above).", + failed.join(", ") + )); + } + + if !succeeded.is_empty() || !failed.is_empty() { + config.shell().status("Summary", summary.join(" "))?; + } + + !failed.is_empty() + }; + + if scheduled_error { + failure::bail!("some packages failed to uninstall"); + } + + Ok(()) +} + +pub fn uninstall_one( + root: &Filesystem, + spec: &str, + bins: &[String], + config: &Config, +) -> CargoResult<()> { + let crate_metadata = metadata(config, root)?; + let metadata = read_crate_list(&crate_metadata)?; + let pkgid = PackageIdSpec::query_str(spec, metadata.v1().keys().cloned())?; + uninstall_pkgid(&crate_metadata, metadata, pkgid, bins, config) +} + +fn uninstall_cwd(root: &Filesystem, bins: &[String], config: &Config) -> CargoResult<()> { + let crate_metadata = metadata(config, root)?; + let metadata = read_crate_list(&crate_metadata)?; + let source_id = SourceId::for_path(config.cwd())?; + let src = path_source(source_id, config)?; + let (pkg, _source) = select_pkg(src, None, None, config, true, &mut |path| { + path.read_packages() + })?; + let pkgid = pkg.package_id(); + uninstall_pkgid(&crate_metadata, metadata, pkgid, bins, config) +} + +fn uninstall_pkgid( + crate_metadata: &FileLock, + mut metadata: CrateListingV1, + pkgid: PackageId, + bins: &[String], + config: &Config, +) -> CargoResult<()> { + let mut to_remove = Vec::new(); + { + let mut installed = match metadata.v1_mut().entry(pkgid) { + Entry::Occupied(e) => e, + Entry::Vacant(..) => failure::bail!("package `{}` is not installed", pkgid), + }; + + let dst = crate_metadata.parent().join("bin"); + for bin in installed.get() { + let bin = dst.join(bin); + if fs::metadata(&bin).is_err() { + failure::bail!( + "corrupt metadata, `{}` does not exist when it should", + bin.display() + ) + } + } + + let bins = bins + .iter() + .map(|s| { + if s.ends_with(env::consts::EXE_SUFFIX) { + s.to_string() + } else { + format!("{}{}", s, env::consts::EXE_SUFFIX) + } + }) + .collect::>(); + + for bin in bins.iter() { + if !installed.get().contains(bin) { + failure::bail!("binary `{}` not installed as part of `{}`", bin, pkgid) + } + } + + if bins.is_empty() { + to_remove.extend(installed.get().iter().map(|b| dst.join(b))); + installed.get_mut().clear(); + } else { + for bin in bins.iter() { + to_remove.push(dst.join(bin)); + installed.get_mut().remove(bin); + } + } + if installed.get().is_empty() { + installed.remove(); + } + } + write_crate_list(&crate_metadata, metadata)?; + for bin in to_remove { + config.shell().status("Removing", bin.display())?; + paths::remove_file(bin)?; + } + + Ok(()) +} diff --git a/src/cargo/ops/common_for_install_and_uninstall.rs b/src/cargo/ops/common_for_install_and_uninstall.rs new file mode 100644 index 000000000..6d739658c --- /dev/null +++ b/src/cargo/ops/common_for_install_and_uninstall.rs @@ -0,0 +1,251 @@ +use std::collections::{BTreeMap, BTreeSet}; +use std::env; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::path::{Path, PathBuf}; + +use semver::VersionReq; +use serde::{Deserialize, Serialize}; + +use crate::core::PackageId; +use crate::core::{Dependency, Package, Source, SourceId}; +use crate::sources::PathSource; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{internal, Config, ToSemver}; +use crate::util::{FileLock, Filesystem}; + +#[derive(Deserialize, Serialize)] +#[serde(untagged)] +pub enum CrateListing { + V1(CrateListingV1), + Empty(Empty), +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct Empty {} + +#[derive(Deserialize, Serialize)] +pub struct CrateListingV1 { + v1: BTreeMap>, +} + +impl CrateListingV1 { + pub fn v1(&self) -> &BTreeMap> { + &self.v1 + } + + pub fn v1_mut(&mut self) -> &mut BTreeMap> { + &mut self.v1 + } +} + +pub fn resolve_root(flag: Option<&str>, config: &Config) -> CargoResult { + let config_root = config.get_path("install.root")?; + Ok(flag + .map(PathBuf::from) + .or_else(|| env::var_os("CARGO_INSTALL_ROOT").map(PathBuf::from)) + .or_else(move || config_root.map(|v| v.val)) + .map(Filesystem::new) + .unwrap_or_else(|| config.home().clone())) +} + +pub fn path_source<'a>(source_id: SourceId, config: &'a Config) -> CargoResult> { + let path = source_id + .url() + .to_file_path() + .map_err(|()| failure::format_err!("path sources must have a valid path"))?; + Ok(PathSource::new(&path, source_id, config)) +} + +pub fn select_pkg<'a, T>( + mut source: T, + name: Option<&str>, + vers: Option<&str>, + config: &Config, + needs_update: bool, + list_all: &mut dyn FnMut(&mut T) -> CargoResult>, +) -> CargoResult<(Package, Box)> +where + T: Source + 'a, +{ + if needs_update { + source.update()?; + } + + match name { + Some(name) => { + let vers = match vers { + Some(v) => { + // If the version begins with character <, >, =, ^, ~ parse it as a + // version range, otherwise parse it as a specific version + let first = v.chars().nth(0).ok_or_else(|| { + failure::format_err!("no version provided for the `--vers` flag") + })?; + + match first { + '<' | '>' | '=' | '^' | '~' => match v.parse::() { + Ok(v) => Some(v.to_string()), + Err(_) => failure::bail!( + "the `--vers` provided, `{}`, is \ + not a valid semver version requirement\n\n + Please have a look at \ + http://doc.crates.io/specifying-dependencies.html \ + for the correct format", + v + ), + }, + _ => match v.to_semver() { + Ok(v) => Some(format!("={}", v)), + Err(_) => { + let mut msg = format!( + "\ + the `--vers` provided, `{}`, is \ + not a valid semver version\n\n\ + historically Cargo treated this \ + as a semver version requirement \ + accidentally\nand will continue \ + to do so, but this behavior \ + will be removed eventually", + v + ); + + // If it is not a valid version but it is a valid version + // requirement, add a note to the warning + if v.parse::().is_ok() { + msg.push_str(&format!( + "\nif you want to specify semver range, \ + add an explicit qualifier, like ^{}", + v + )); + } + config.shell().warn(&msg)?; + Some(v.to_string()) + } + }, + } + } + None => None, + }; + let vers = vers.as_ref().map(|s| &**s); + let vers_spec = if vers.is_none() && source.source_id().is_registry() { + // Avoid pre-release versions from crate.io + // unless explicitly asked for + Some("*") + } else { + vers + }; + let dep = Dependency::parse_no_deprecated(name, vers_spec, source.source_id())?; + let deps = source.query_vec(&dep)?; + match deps.iter().map(|p| p.package_id()).max() { + Some(pkgid) => { + let pkg = Box::new(&mut source).download_now(pkgid, config)?; + Ok((pkg, Box::new(source))) + }, + None => { + let vers_info = vers + .map(|v| format!(" with version `{}`", v)) + .unwrap_or_default(); + failure::bail!( + "could not find `{}` in {}{}", + name, + source.source_id(), + vers_info + ) + } + } + } + None => { + let candidates = list_all(&mut source)?; + let binaries = candidates + .iter() + .filter(|cand| cand.targets().iter().filter(|t| t.is_bin()).count() > 0); + let examples = candidates + .iter() + .filter(|cand| cand.targets().iter().filter(|t| t.is_example()).count() > 0); + let pkg = match one(binaries, |v| multi_err("binaries", v))? { + Some(p) => p, + None => match one(examples, |v| multi_err("examples", v))? { + Some(p) => p, + None => failure::bail!( + "no packages found with binaries or \ + examples" + ), + }, + }; + return Ok((pkg.clone(), Box::new(source))); + + fn multi_err(kind: &str, mut pkgs: Vec<&Package>) -> String { + pkgs.sort_unstable_by_key(|a| a.name()); + format!( + "multiple packages with {} found: {}", + kind, + pkgs.iter() + .map(|p| p.name().as_str()) + .collect::>() + .join(", ") + ) + } + } + } +} + +pub fn one(mut i: I, f: F) -> CargoResult> +where + I: Iterator, + F: FnOnce(Vec) -> String, +{ + match (i.next(), i.next()) { + (Some(i1), Some(i2)) => { + let mut v = vec![i1, i2]; + v.extend(i); + Err(failure::format_err!("{}", f(v))) + } + (Some(i), None) => Ok(Some(i)), + (None, _) => Ok(None), + } +} + +pub fn read_crate_list(file: &FileLock) -> CargoResult { + let listing = (|| -> CargoResult<_> { + let mut contents = String::new(); + file.file().read_to_string(&mut contents)?; + let listing = + toml::from_str(&contents).chain_err(|| internal("invalid TOML found for metadata"))?; + match listing { + CrateListing::V1(v1) => Ok(v1), + CrateListing::Empty(_) => Ok(CrateListingV1 { + v1: BTreeMap::new(), + }), + } + })() + .chain_err(|| { + failure::format_err!( + "failed to parse crate metadata at `{}`", + file.path().to_string_lossy() + ) + })?; + Ok(listing) +} + +pub fn write_crate_list(file: &FileLock, listing: CrateListingV1) -> CargoResult<()> { + (|| -> CargoResult<_> { + let mut file = file.file(); + file.seek(SeekFrom::Start(0))?; + file.set_len(0)?; + let data = toml::to_string(&CrateListing::V1(listing))?; + file.write_all(data.as_bytes())?; + Ok(()) + })() + .chain_err(|| { + failure::format_err!( + "failed to write crate metadata at `{}`", + file.path().to_string_lossy() + ) + })?; + Ok(()) +} + +pub fn metadata(config: &Config, root: &Filesystem) -> CargoResult { + root.open_rw(Path::new(".crates.toml"), config, "crate metadata") +} diff --git a/src/cargo/ops/fix.rs b/src/cargo/ops/fix.rs new file mode 100644 index 000000000..0d2bfbcee --- /dev/null +++ b/src/cargo/ops/fix.rs @@ -0,0 +1,691 @@ +//! High-level overview of how `fix` works: +//! +//! The main goal is to run `cargo check` to get rustc to emit JSON +//! diagnostics with suggested fixes that can be applied to the files on the +//! filesystem, and validate that those changes didn't break anything. +//! +//! Cargo begins by launching a `LockServer` thread in the background to +//! listen for network connections to coordinate locking when multiple targets +//! are built simultaneously. It ensures each package has only one fix running +//! at once. +//! +//! The `RustfixDiagnosticServer` is launched in a background thread (in +//! `JobQueue`) to listen for network connections to coordinate displaying +//! messages to the user on the console (so that multiple processes don't try +//! to print at the same time). +//! +//! Cargo begins a normal `cargo check` operation with itself set as a proxy +//! for rustc by setting `cargo_as_rustc_wrapper` in the build config. When +//! cargo launches rustc to check a crate, it is actually launching itself. +//! The `FIX_ENV` environment variable is set so that cargo knows it is in +//! fix-proxy-mode. It also sets the `RUSTC` environment variable to the +//! actual rustc so Cargo knows what to execute. +//! +//! Each proxied cargo-as-rustc detects it is in fix-proxy-mode (via `FIX_ENV` +//! environment variable in `main`) and does the following: +//! +//! - Acquire a lock from the `LockServer` from the master cargo process. +//! - Launches the real rustc (`rustfix_and_fix`), looking at the JSON output +//! for suggested fixes. +//! - Uses the `rustfix` crate to apply the suggestions to the files on the +//! file system. +//! - If rustfix fails to apply any suggestions (for example, they are +//! overlapping), but at least some suggestions succeeded, it will try the +//! previous two steps up to 4 times as long as some suggestions succeed. +//! - Assuming there's at least one suggestion applied, and the suggestions +//! applied cleanly, rustc is run again to verify the suggestions didn't +//! break anything. The change will be backed out if it fails (unless +//! `--broken-code` is used). +//! - If there are any warnings or errors, rustc will be run one last time to +//! show them to the user. + +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::env; +use std::ffi::OsString; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::{self, Command, ExitStatus}; +use std::str; + +use failure::{Error, ResultExt}; +use log::{debug, trace, warn}; +use rustfix::diagnostics::Diagnostic; +use rustfix::{self, CodeFix}; + +use crate::core::Workspace; +use crate::ops::{self, CompileOptions}; +use crate::util::diagnostic_server::{Message, RustfixDiagnosticServer}; +use crate::util::errors::CargoResult; +use crate::util::paths; +use crate::util::{existing_vcs_repo, LockServer, LockServerClient}; + +const FIX_ENV: &str = "__CARGO_FIX_PLZ"; +const BROKEN_CODE_ENV: &str = "__CARGO_FIX_BROKEN_CODE"; +const PREPARE_FOR_ENV: &str = "__CARGO_FIX_PREPARE_FOR"; +const EDITION_ENV: &str = "__CARGO_FIX_EDITION"; +const IDIOMS_ENV: &str = "__CARGO_FIX_IDIOMS"; + +pub struct FixOptions<'a> { + pub edition: bool, + pub prepare_for: Option<&'a str>, + pub idioms: bool, + pub compile_opts: CompileOptions<'a>, + pub allow_dirty: bool, + pub allow_no_vcs: bool, + pub allow_staged: bool, + pub broken_code: bool, +} + +pub fn fix(ws: &Workspace<'_>, opts: &mut FixOptions<'_>) -> CargoResult<()> { + check_version_control(opts)?; + + // Spin up our lock server, which our subprocesses will use to synchronize fixes. + let lock_server = LockServer::new()?; + opts.compile_opts + .build_config + .extra_rustc_env + .push((FIX_ENV.to_string(), lock_server.addr().to_string())); + let _started = lock_server.start()?; + + opts.compile_opts.build_config.force_rebuild = true; + + if opts.broken_code { + let key = BROKEN_CODE_ENV.to_string(); + opts.compile_opts + .build_config + .extra_rustc_env + .push((key, "1".to_string())); + } + + if opts.edition { + let key = EDITION_ENV.to_string(); + opts.compile_opts + .build_config + .extra_rustc_env + .push((key, "1".to_string())); + } else if let Some(edition) = opts.prepare_for { + opts.compile_opts + .build_config + .extra_rustc_env + .push((PREPARE_FOR_ENV.to_string(), edition.to_string())); + } + if opts.idioms { + opts.compile_opts + .build_config + .extra_rustc_env + .push((IDIOMS_ENV.to_string(), "1".to_string())); + } + opts.compile_opts.build_config.cargo_as_rustc_wrapper = true; + *opts + .compile_opts + .build_config + .rustfix_diagnostic_server + .borrow_mut() = Some(RustfixDiagnosticServer::new()?); + + ops::compile(ws, &opts.compile_opts)?; + Ok(()) +} + +fn check_version_control(opts: &FixOptions<'_>) -> CargoResult<()> { + if opts.allow_no_vcs { + return Ok(()); + } + let config = opts.compile_opts.config; + if !existing_vcs_repo(config.cwd(), config.cwd()) { + failure::bail!( + "no VCS found for this package and `cargo fix` can potentially \ + perform destructive changes; if you'd like to suppress this \ + error pass `--allow-no-vcs`" + ) + } + + if opts.allow_dirty && opts.allow_staged { + return Ok(()); + } + + let mut dirty_files = Vec::new(); + let mut staged_files = Vec::new(); + if let Ok(repo) = git2::Repository::discover(config.cwd()) { + let mut repo_opts = git2::StatusOptions::new(); + repo_opts.include_ignored(false); + for status in repo.statuses(Some(&mut repo_opts))?.iter() { + if let Some(path) = status.path() { + match status.status() { + git2::Status::CURRENT => (), + git2::Status::INDEX_NEW + | git2::Status::INDEX_MODIFIED + | git2::Status::INDEX_DELETED + | git2::Status::INDEX_RENAMED + | git2::Status::INDEX_TYPECHANGE => { + if !opts.allow_staged { + staged_files.push(path.to_string()) + } + } + _ => { + if !opts.allow_dirty { + dirty_files.push(path.to_string()) + } + } + }; + } + } + } + + if dirty_files.is_empty() && staged_files.is_empty() { + return Ok(()); + } + + let mut files_list = String::new(); + for file in dirty_files { + files_list.push_str(" * "); + files_list.push_str(&file); + files_list.push_str(" (dirty)\n"); + } + for file in staged_files { + files_list.push_str(" * "); + files_list.push_str(&file); + files_list.push_str(" (staged)\n"); + } + + failure::bail!( + "the working directory of this package has uncommitted changes, and \ + `cargo fix` can potentially perform destructive changes; if you'd \ + like to suppress this error pass `--allow-dirty`, `--allow-staged`, \ + or commit the changes to these files:\n\ + \n\ + {}\n\ + ", + files_list + ); +} + +pub fn fix_maybe_exec_rustc() -> CargoResult { + let lock_addr = match env::var(FIX_ENV) { + Ok(s) => s, + Err(_) => return Ok(false), + }; + + let args = FixArgs::get(); + trace!("cargo-fix as rustc got file {:?}", args.file); + let rustc = env::var_os("RUSTC").expect("failed to find RUSTC env var"); + + // Our goal is to fix only the crates that the end user is interested in. + // That's very likely to only mean the crates in the workspace the user is + // working on, not random crates.io crates. + // + // To that end we only actually try to fix things if it looks like we're + // compiling a Rust file and it *doesn't* have an absolute filename. That's + // not the best heuristic but matches what Cargo does today at least. + let mut fixes = FixedCrate::default(); + if let Some(path) = &args.file { + if args.primary_package { + trace!("start rustfixing {:?}", path); + fixes = rustfix_crate(&lock_addr, rustc.as_ref(), path, &args)?; + } + } + + // Ok now we have our final goal of testing out the changes that we applied. + // If these changes went awry and actually started to cause the crate to + // *stop* compiling then we want to back them out and continue to print + // warnings to the user. + // + // If we didn't actually make any changes then we can immediately execute the + // new rustc, and otherwise we capture the output to hide it in the scenario + // that we have to back it all out. + if !fixes.files.is_empty() { + let mut cmd = Command::new(&rustc); + args.apply(&mut cmd); + cmd.arg("--error-format=json"); + let output = cmd.output().context("failed to spawn rustc")?; + + if output.status.success() { + for (path, file) in fixes.files.iter() { + Message::Fixing { + file: path.clone(), + fixes: file.fixes_applied, + } + .post()?; + } + } + + // If we succeeded then we'll want to commit to the changes we made, if + // any. If stderr is empty then there's no need for the final exec at + // the end, we just bail out here. + if output.status.success() && output.stderr.is_empty() { + return Ok(true); + } + + // Otherwise, if our rustc just failed, then that means that we broke the + // user's code with our changes. Back out everything and fall through + // below to recompile again. + if !output.status.success() { + if env::var_os(BROKEN_CODE_ENV).is_none() { + for (path, file) in fixes.files.iter() { + fs::write(path, &file.original_code) + .with_context(|_| format!("failed to write file `{}`", path))?; + } + } + log_failed_fix(&output.stderr)?; + } + } + + let mut cmd = Command::new(&rustc); + args.apply(&mut cmd); + exit_with(cmd.status().context("failed to spawn rustc")?); +} + +#[derive(Default)] +struct FixedCrate { + files: HashMap, +} + +struct FixedFile { + errors_applying_fixes: Vec, + fixes_applied: u32, + original_code: String, +} + +fn rustfix_crate( + lock_addr: &str, + rustc: &Path, + filename: &Path, + args: &FixArgs, +) -> Result { + args.verify_not_preparing_for_enabled_edition()?; + + // First up, we want to make sure that each crate is only checked by one + // process at a time. If two invocations concurrently check a crate then + // it's likely to corrupt it. + // + // We currently do this by assigning the name on our lock to the manifest + // directory. + let dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR is missing?"); + let _lock = LockServerClient::lock(&lock_addr.parse()?, dir)?; + + // Next up, this is a bit suspicious, but we *iteratively* execute rustc and + // collect suggestions to feed to rustfix. Once we hit our limit of times to + // execute rustc or we appear to be reaching a fixed point we stop running + // rustc. + // + // This is currently done to handle code like: + // + // ::foo::<::Bar>(); + // + // where there are two fixes to happen here: `crate::foo::()`. + // The spans for these two suggestions are overlapping and its difficult in + // the compiler to **not** have overlapping spans here. As a result, a naive + // implementation would feed the two compiler suggestions for the above fix + // into `rustfix`, but one would be rejected because it overlaps with the + // other. + // + // In this case though, both suggestions are valid and can be automatically + // applied! To handle this case we execute rustc multiple times, collecting + // fixes each time we do so. Along the way we discard any suggestions that + // failed to apply, assuming that they can be fixed the next time we run + // rustc. + // + // Naturally, we want a few protections in place here though to avoid looping + // forever or otherwise losing data. To that end we have a few termination + // conditions: + // + // * Do this whole process a fixed number of times. In theory we probably + // need an infinite number of times to apply fixes, but we're not gonna + // sit around waiting for that. + // * If it looks like a fix genuinely can't be applied we need to bail out. + // Detect this when a fix fails to get applied *and* no suggestions + // successfully applied to the same file. In that case looks like we + // definitely can't make progress, so bail out. + let mut fixes = FixedCrate::default(); + let mut last_fix_counts = HashMap::new(); + let iterations = env::var("CARGO_FIX_MAX_RETRIES") + .ok() + .and_then(|n| n.parse().ok()) + .unwrap_or(4); + for _ in 0..iterations { + last_fix_counts.clear(); + for (path, file) in fixes.files.iter_mut() { + last_fix_counts.insert(path.clone(), file.fixes_applied); + // We'll generate new errors below. + file.errors_applying_fixes.clear(); + } + rustfix_and_fix(&mut fixes, rustc, filename, args)?; + let mut progress_yet_to_be_made = false; + for (path, file) in fixes.files.iter_mut() { + if file.errors_applying_fixes.is_empty() { + continue; + } + // If anything was successfully fixed *and* there's at least one + // error, then assume the error was spurious and we'll try again on + // the next iteration. + if file.fixes_applied != *last_fix_counts.get(path).unwrap_or(&0) { + progress_yet_to_be_made = true; + } + } + if !progress_yet_to_be_made { + break; + } + } + + // Any errors still remaining at this point need to be reported as probably + // bugs in Cargo and/or rustfix. + for (path, file) in fixes.files.iter_mut() { + for error in file.errors_applying_fixes.drain(..) { + Message::ReplaceFailed { + file: path.clone(), + message: error, + } + .post()?; + } + } + + Ok(fixes) +} + +/// Executes `rustc` to apply one round of suggestions to the crate in question. +/// +/// This will fill in the `fixes` map with original code, suggestions applied, +/// and any errors encountered while fixing files. +fn rustfix_and_fix( + fixes: &mut FixedCrate, + rustc: &Path, + filename: &Path, + args: &FixArgs, +) -> Result<(), Error> { + // If not empty, filter by these lints. + // TODO: implement a way to specify this. + let only = HashSet::new(); + + let mut cmd = Command::new(rustc); + cmd.arg("--error-format=json"); + args.apply(&mut cmd); + let output = cmd + .output() + .with_context(|_| format!("failed to execute `{}`", rustc.display()))?; + + // If rustc didn't succeed for whatever reasons then we're very likely to be + // looking at otherwise broken code. Let's not make things accidentally + // worse by applying fixes where a bug could cause *more* broken code. + // Instead, punt upwards which will reexec rustc over the original code, + // displaying pretty versions of the diagnostics we just read out. + if !output.status.success() && env::var_os(BROKEN_CODE_ENV).is_none() { + debug!( + "rustfixing `{:?}` failed, rustc exited with {:?}", + filename, + output.status.code() + ); + return Ok(()); + } + + let fix_mode = env::var_os("__CARGO_FIX_YOLO") + .map(|_| rustfix::Filter::Everything) + .unwrap_or(rustfix::Filter::MachineApplicableOnly); + + // Sift through the output of the compiler to look for JSON messages. + // indicating fixes that we can apply. + let stderr = str::from_utf8(&output.stderr).context("failed to parse rustc stderr as UTF-8")?; + + let suggestions = stderr + .lines() + .filter(|x| !x.is_empty()) + .inspect(|y| trace!("line: {}", y)) + // Parse each line of stderr, ignoring errors, as they may not all be JSON. + .filter_map(|line| serde_json::from_str::(line).ok()) + // From each diagnostic, try to extract suggestions from rustc. + .filter_map(|diag| rustfix::collect_suggestions(&diag, &only, fix_mode)); + + // Collect suggestions by file so we can apply them one at a time later. + let mut file_map = HashMap::new(); + let mut num_suggestion = 0; + for suggestion in suggestions { + trace!("suggestion"); + // Make sure we've got a file associated with this suggestion and all + // snippets point to the same file. Right now it's not clear what + // we would do with multiple files. + let file_names = suggestion + .solutions + .iter() + .flat_map(|s| s.replacements.iter()) + .map(|r| &r.snippet.file_name); + + let file_name = if let Some(file_name) = file_names.clone().next() { + file_name.clone() + } else { + trace!("rejecting as it has no solutions {:?}", suggestion); + continue; + }; + + if !file_names.clone().all(|f| f == &file_name) { + trace!("rejecting as it changes multiple files: {:?}", suggestion); + continue; + } + + file_map + .entry(file_name) + .or_insert_with(Vec::new) + .push(suggestion); + num_suggestion += 1; + } + + debug!( + "collected {} suggestions for `{}`", + num_suggestion, + filename.display(), + ); + + for (file, suggestions) in file_map { + // Attempt to read the source code for this file. If this fails then + // that'd be pretty surprising, so log a message and otherwise keep + // going. + let code = match paths::read(file.as_ref()) { + Ok(s) => s, + Err(e) => { + warn!("failed to read `{}`: {}", file, e); + continue; + } + }; + let num_suggestions = suggestions.len(); + debug!("applying {} fixes to {}", num_suggestions, file); + + // If this file doesn't already exist then we just read the original + // code, so save it. If the file already exists then the original code + // doesn't need to be updated as we've just read an interim state with + // some fixes but perhaps not all. + let fixed_file = fixes + .files + .entry(file.clone()) + .or_insert_with(|| FixedFile { + errors_applying_fixes: Vec::new(), + fixes_applied: 0, + original_code: code.clone(), + }); + let mut fixed = CodeFix::new(&code); + + // As mentioned above in `rustfix_crate`, we don't immediately warn + // about suggestions that fail to apply here, and instead we save them + // off for later processing. + for suggestion in suggestions.iter().rev() { + match fixed.apply(suggestion) { + Ok(()) => fixed_file.fixes_applied += 1, + Err(e) => fixed_file.errors_applying_fixes.push(e.to_string()), + } + } + let new_code = fixed.finish()?; + fs::write(&file, new_code).with_context(|_| format!("failed to write file `{}`", file))?; + } + + Ok(()) +} + +fn exit_with(status: ExitStatus) -> ! { + #[cfg(unix)] + { + use std::os::unix::prelude::*; + if let Some(signal) = status.signal() { + eprintln!("child failed with signal `{}`", signal); + process::exit(2); + } + } + process::exit(status.code().unwrap_or(3)); +} + +fn log_failed_fix(stderr: &[u8]) -> Result<(), Error> { + let stderr = str::from_utf8(stderr).context("failed to parse rustc stderr as utf-8")?; + + let diagnostics = stderr + .lines() + .filter(|x| !x.is_empty()) + .filter_map(|line| serde_json::from_str::(line).ok()); + let mut files = BTreeSet::new(); + let mut errors = Vec::new(); + for diagnostic in diagnostics { + errors.push(diagnostic.rendered.unwrap_or(diagnostic.message)); + for span in diagnostic.spans.into_iter() { + files.insert(span.file_name); + } + } + let mut krate = None; + let mut prev_dash_dash_krate_name = false; + for arg in env::args() { + if prev_dash_dash_krate_name { + krate = Some(arg.clone()); + } + + if arg == "--crate-name" { + prev_dash_dash_krate_name = true; + } else { + prev_dash_dash_krate_name = false; + } + } + + let files = files.into_iter().collect(); + Message::FixFailed { + files, + krate, + errors, + } + .post()?; + + Ok(()) +} + +#[derive(Default)] +struct FixArgs { + file: Option, + prepare_for_edition: PrepareFor, + idioms: bool, + enabled_edition: Option, + other: Vec, + primary_package: bool, +} + +enum PrepareFor { + Next, + Edition(String), + None, +} + +impl Default for PrepareFor { + fn default() -> PrepareFor { + PrepareFor::None + } +} + +impl FixArgs { + fn get() -> FixArgs { + let mut ret = FixArgs::default(); + for arg in env::args_os().skip(1) { + let path = PathBuf::from(arg); + if path.extension().and_then(|s| s.to_str()) == Some("rs") && path.exists() { + ret.file = Some(path); + continue; + } + if let Some(s) = path.to_str() { + let prefix = "--edition="; + if s.starts_with(prefix) { + ret.enabled_edition = Some(s[prefix.len()..].to_string()); + continue; + } + } + ret.other.push(path.into()); + } + if let Ok(s) = env::var(PREPARE_FOR_ENV) { + ret.prepare_for_edition = PrepareFor::Edition(s); + } else if env::var(EDITION_ENV).is_ok() { + ret.prepare_for_edition = PrepareFor::Next; + } + ret.idioms = env::var(IDIOMS_ENV).is_ok(); + ret.primary_package = env::var("CARGO_PRIMARY_PACKAGE").is_ok(); + ret + } + + fn apply(&self, cmd: &mut Command) { + if let Some(path) = &self.file { + cmd.arg(path); + } + cmd.args(&self.other).arg("--cap-lints=warn"); + if let Some(edition) = &self.enabled_edition { + cmd.arg("--edition").arg(edition); + if self.idioms && self.primary_package && edition == "2018" { + cmd.arg("-Wrust-2018-idioms"); + } + } + if self.primary_package { + if let Some(edition) = self.prepare_for_edition_resolve() { + cmd.arg("-W").arg(format!("rust-{}-compatibility", edition)); + } + } + } + + /// Verifies that we're not both preparing for an enabled edition and enabling + /// the edition. + /// + /// This indicates that `cargo fix --prepare-for` is being executed out of + /// order with enabling the edition itself, meaning that we wouldn't + /// actually be able to fix anything! If it looks like this is happening + /// then yield an error to the user, indicating that this is happening. + fn verify_not_preparing_for_enabled_edition(&self) -> CargoResult<()> { + let edition = match self.prepare_for_edition_resolve() { + Some(s) => s, + None => return Ok(()), + }; + let enabled = match &self.enabled_edition { + Some(s) => s, + None => return Ok(()), + }; + if edition != enabled { + return Ok(()); + } + let path = match &self.file { + Some(s) => s, + None => return Ok(()), + }; + + Message::EditionAlreadyEnabled { + file: path.display().to_string(), + edition: edition.to_string(), + } + .post()?; + + process::exit(1); + } + + fn prepare_for_edition_resolve(&self) -> Option<&str> { + match &self.prepare_for_edition { + PrepareFor::Edition(s) => Some(s), + PrepareFor::Next => Some(self.next_edition()), + PrepareFor::None => None, + } + } + + fn next_edition(&self) -> &str { + match self.enabled_edition.as_ref().map(|s| &**s) { + // 2015 -> 2018, + None | Some("2015") => "2018", + + // This'll probably be wrong in 2020, but that's future Cargo's + // problem. Eventually though we'll just add more editions here as + // necessary. + _ => "2018", + } + } +} diff --git a/src/cargo/ops/lockfile.rs b/src/cargo/ops/lockfile.rs new file mode 100644 index 000000000..9ea513946 --- /dev/null +++ b/src/cargo/ops/lockfile.rs @@ -0,0 +1,190 @@ +use std::io::prelude::*; + +use toml; + +use crate::core::resolver::WorkspaceResolve; +use crate::core::{resolver, Resolve, Workspace}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::toml as cargo_toml; +use crate::util::Filesystem; + +pub fn load_pkg_lockfile(ws: &Workspace<'_>) -> CargoResult> { + if !ws.root().join("Cargo.lock").exists() { + return Ok(None); + } + + let root = Filesystem::new(ws.root().to_path_buf()); + let mut f = root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file")?; + + let mut s = String::new(); + f.read_to_string(&mut s) + .chain_err(|| format!("failed to read file: {}", f.path().display()))?; + + let resolve = (|| -> CargoResult> { + let resolve: toml::Value = cargo_toml::parse(&s, f.path(), ws.config())?; + let v: resolver::EncodableResolve = resolve.try_into()?; + Ok(Some(v.into_resolve(ws)?)) + })() + .chain_err(|| format!("failed to parse lock file at: {}", f.path().display()))?; + Ok(resolve) +} + +pub fn write_pkg_lockfile(ws: &Workspace<'_>, resolve: &Resolve) -> CargoResult<()> { + // Load the original lock file if it exists. + let ws_root = Filesystem::new(ws.root().to_path_buf()); + let orig = ws_root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file"); + let orig = orig.and_then(|mut f| { + let mut s = String::new(); + f.read_to_string(&mut s)?; + Ok(s) + }); + + let toml = toml::Value::try_from(WorkspaceResolve { ws, resolve }).unwrap(); + + let mut out = String::new(); + + // At the start of the file we notify the reader that the file is generated. + // Specifically Phabricator ignores files containing "@generated", so we use that. + let marker_line = "# This file is automatically @generated by Cargo."; + let extra_line = "# It is not intended for manual editing."; + out.push_str(marker_line); + out.push('\n'); + out.push_str(extra_line); + out.push('\n'); + // and preserve any other top comments + if let Ok(orig) = &orig { + let mut comments = orig.lines().take_while(|line| line.starts_with('#')); + if let Some(first) = comments.next() { + if first != marker_line { + out.push_str(first); + out.push('\n'); + } + if let Some(second) = comments.next() { + if second != extra_line { + out.push_str(second); + out.push('\n'); + } + for line in comments { + out.push_str(line); + out.push('\n'); + } + } + } + } + + let deps = toml["package"].as_array().unwrap(); + for dep in deps.iter() { + let dep = dep.as_table().unwrap(); + + out.push_str("[[package]]\n"); + emit_package(dep, &mut out); + } + + if let Some(patch) = toml.get("patch") { + let list = patch["unused"].as_array().unwrap(); + for entry in list { + out.push_str("[[patch.unused]]\n"); + emit_package(entry.as_table().unwrap(), &mut out); + out.push_str("\n"); + } + } + + if let Some(meta) = toml.get("metadata") { + out.push_str("[metadata]\n"); + out.push_str(&meta.to_string()); + } + + // If the lock file contents haven't changed so don't rewrite it. This is + // helpful on read-only filesystems. + if let Ok(orig) = orig { + if are_equal_lockfiles(orig, &out, ws) { + return Ok(()); + } + } + + if !ws.config().lock_update_allowed() { + if ws.config().cli_unstable().offline { + failure::bail!("can't update in the offline mode"); + } + + let flag = if ws.config().network_allowed() { + "--locked" + } else { + "--frozen" + }; + failure::bail!( + "the lock file {} needs to be updated but {} was passed to \ + prevent this", + ws.root().to_path_buf().join("Cargo.lock").display(), + flag + ); + } + + // Ok, if that didn't work just write it out + ws_root + .open_rw("Cargo.lock", ws.config(), "Cargo.lock file") + .and_then(|mut f| { + f.file().set_len(0)?; + f.write_all(out.as_bytes())?; + Ok(()) + }) + .chain_err(|| format!("failed to write {}", ws.root().join("Cargo.lock").display()))?; + Ok(()) +} + +fn are_equal_lockfiles(mut orig: String, current: &str, ws: &Workspace<'_>) -> bool { + if has_crlf_line_endings(&orig) { + orig = orig.replace("\r\n", "\n"); + } + + // If we want to try and avoid updating the lock file, parse both and + // compare them; since this is somewhat expensive, don't do it in the + // common case where we can update lock files. + if !ws.config().lock_update_allowed() { + let res: CargoResult = (|| { + let old: resolver::EncodableResolve = toml::from_str(&orig)?; + let new: resolver::EncodableResolve = toml::from_str(current)?; + Ok(old.into_resolve(ws)? == new.into_resolve(ws)?) + })(); + if let Ok(true) = res { + return true; + } + } + + current == orig +} + +fn has_crlf_line_endings(s: &str) -> bool { + // Only check the first line. + if let Some(lf) = s.find('\n') { + s[..lf].ends_with('\r') + } else { + false + } +} + +fn emit_package(dep: &toml::value::Table, out: &mut String) { + out.push_str(&format!("name = {}\n", &dep["name"])); + out.push_str(&format!("version = {}\n", &dep["version"])); + + if dep.contains_key("source") { + out.push_str(&format!("source = {}\n", &dep["source"])); + } + + if let Some(s) = dep.get("dependencies") { + let slice = s.as_array().unwrap(); + + if !slice.is_empty() { + out.push_str("dependencies = [\n"); + + for child in slice.iter() { + out.push_str(&format!(" {},\n", child)); + } + + out.push_str("]\n"); + } + out.push_str("\n"); + } else if dep.contains_key("replace") { + out.push_str(&format!("replace = {}\n\n", &dep["replace"])); + } +} diff --git a/src/cargo/ops/mod.rs b/src/cargo/ops/mod.rs new file mode 100644 index 000000000..f12f66bef --- /dev/null +++ b/src/cargo/ops/mod.rs @@ -0,0 +1,48 @@ +pub use self::cargo_clean::{clean, CleanOptions}; +pub use self::cargo_compile::{compile, compile_with_exec, compile_ws, CompileOptions}; +pub use self::cargo_compile::{CompileFilter, FilterRule, Packages}; +pub use self::cargo_doc::{doc, DocOptions}; +pub use self::cargo_fetch::{fetch, FetchOptions}; +pub use self::cargo_generate_lockfile::generate_lockfile; +pub use self::cargo_generate_lockfile::update_lockfile; +pub use self::cargo_generate_lockfile::UpdateOptions; +pub use self::cargo_install::{install, install_list}; +pub use self::cargo_new::{init, new, NewOptions, VersionControl}; +pub use self::cargo_output_metadata::{output_metadata, ExportInfo, OutputMetadataOptions}; +pub use self::cargo_package::{package, PackageOpts}; +pub use self::cargo_pkgid::pkgid; +pub use self::cargo_read_manifest::{read_package, read_packages}; +pub use self::cargo_run::run; +pub use self::cargo_test::{run_benches, run_tests, TestOptions}; +pub use self::cargo_uninstall::uninstall; +pub use self::fix::{fix, fix_maybe_exec_rustc, FixOptions}; +pub use self::lockfile::{load_pkg_lockfile, write_pkg_lockfile}; +pub use self::registry::HttpTimeout; +pub use self::registry::{configure_http_handle, http_handle_and_timeout}; +pub use self::registry::{http_handle, needs_custom_http_transport, registry_login, search}; +pub use self::registry::{modify_owners, yank, OwnersOptions, PublishOpts}; +pub use self::registry::{publish, registry_configuration, RegistryConfig}; +pub use self::resolve::{ + add_overrides, get_resolved_packages, resolve_with_previous, resolve_ws, resolve_ws_precisely, + resolve_ws_with_method, +}; + +mod cargo_clean; +mod cargo_compile; +mod cargo_doc; +mod cargo_fetch; +mod cargo_generate_lockfile; +mod cargo_install; +mod cargo_new; +mod cargo_output_metadata; +mod cargo_package; +mod cargo_pkgid; +mod cargo_read_manifest; +mod cargo_run; +mod cargo_test; +mod cargo_uninstall; +mod common_for_install_and_uninstall; +mod fix; +mod lockfile; +mod registry; +mod resolve; diff --git a/src/cargo/ops/registry.rs b/src/cargo/ops/registry.rs new file mode 100644 index 000000000..3f30eaf40 --- /dev/null +++ b/src/cargo/ops/registry.rs @@ -0,0 +1,773 @@ +use std::collections::{BTreeMap, HashSet}; +use std::fs::{self, File}; +use std::io::{self, BufRead}; +use std::iter::repeat; +use std::str; +use std::time::Duration; +use std::{cmp, env}; + +use crates_io::{NewCrate, NewCrateDependency, Registry}; +use curl::easy::{Easy, InfoType, SslOpt}; +use log::{log, Level}; +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +use crate::core::dependency::Kind; +use crate::core::manifest::ManifestMetadata; +use crate::core::source::Source; +use crate::core::{Package, SourceId, Workspace}; +use crate::ops; +use crate::sources::{RegistrySource, SourceConfigMap}; +use crate::util::config::{self, Config}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::important_paths::find_root_manifest_for_wd; +use crate::util::ToUrl; +use crate::util::{paths, validate_package_name}; +use crate::version; + +pub struct RegistryConfig { + pub index: Option, + pub token: Option, +} + +pub struct PublishOpts<'cfg> { + pub config: &'cfg Config, + pub token: Option, + pub index: Option, + pub verify: bool, + pub allow_dirty: bool, + pub jobs: Option, + pub target: Option, + pub dry_run: bool, + pub registry: Option, + pub features: Vec, + pub all_features: bool, + pub no_default_features: bool, +} + +pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> { + let pkg = ws.current()?; + + if let Some(ref allowed_registries) = *pkg.publish() { + if !match opts.registry { + Some(ref registry) => allowed_registries.contains(registry), + None => false, + } { + failure::bail!( + "some crates cannot be published.\n\ + `{}` is marked as unpublishable", + pkg.name() + ); + } + } + + let (mut registry, reg_id) = registry( + opts.config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + true, + )?; + verify_dependencies(pkg, ®istry, reg_id)?; + + // Prepare a tarball, with a non-surpressable warning if metadata + // is missing since this is being put online. + let tarball = ops::package( + ws, + &ops::PackageOpts { + config: opts.config, + verify: opts.verify, + list: false, + check_metadata: true, + allow_dirty: opts.allow_dirty, + target: opts.target.clone(), + jobs: opts.jobs, + features: opts.features.clone(), + all_features: opts.all_features, + no_default_features: opts.no_default_features, + }, + )? + .unwrap(); + + // Upload said tarball to the specified destination + opts.config + .shell() + .status("Uploading", pkg.package_id().to_string())?; + transmit( + opts.config, + pkg, + tarball.file(), + &mut registry, + reg_id, + opts.dry_run, + )?; + + Ok(()) +} + +fn verify_dependencies( + pkg: &Package, + registry: &Registry, + registry_src: SourceId, +) -> CargoResult<()> { + for dep in pkg.dependencies().iter() { + if dep.source_id().is_path() { + if !dep.specified_req() { + failure::bail!( + "all path dependencies must have a version specified \ + when publishing.\ndependency `{}` does not specify \ + a version", + dep.package_name() + ) + } + } else if dep.source_id() != registry_src { + if dep.source_id().is_registry() { + // Block requests to send to crates.io with alt-registry deps. + // This extra hostname check is mostly to assist with testing, + // but also prevents someone using `--index` to specify + // something that points to crates.io. + let is_crates_io = registry + .host() + .to_url() + .map(|u| u.host_str() == Some("crates.io")) + .unwrap_or(false); + if registry_src.is_default_registry() || is_crates_io { + failure::bail!("crates cannot be published to crates.io with dependencies sourced from other\n\ + registries either publish `{}` on crates.io or pull it into this repository\n\ + and specify it with a path and version\n\ + (crate `{}` is pulled from {})", + dep.package_name(), + dep.package_name(), + dep.source_id()); + } + } else { + failure::bail!( + "crates cannot be published with dependencies sourced from \ + a repository\neither publish `{}` as its own crate and \ + specify a version as a dependency or pull it into this \ + repository and specify it with a path and version\n(crate `{}` has \ + repository path `{}`)", + dep.package_name(), + dep.package_name(), + dep.source_id() + ); + } + } + } + Ok(()) +} + +fn transmit( + config: &Config, + pkg: &Package, + tarball: &File, + registry: &mut Registry, + registry_id: SourceId, + dry_run: bool, +) -> CargoResult<()> { + let deps = pkg + .dependencies() + .iter() + .map(|dep| { + // If the dependency is from a different registry, then include the + // registry in the dependency. + let dep_registry_id = match dep.registry_id() { + Some(id) => id, + None => SourceId::crates_io(config)?, + }; + // In the index and Web API, None means "from the same registry" + // whereas in Cargo.toml, it means "from crates.io". + let dep_registry = if dep_registry_id != registry_id { + Some(dep_registry_id.url().to_string()) + } else { + None + }; + + Ok(NewCrateDependency { + optional: dep.is_optional(), + default_features: dep.uses_default_features(), + name: dep.package_name().to_string(), + features: dep.features().iter().map(|s| s.to_string()).collect(), + version_req: dep.version_req().to_string(), + target: dep.platform().map(|s| s.to_string()), + kind: match dep.kind() { + Kind::Normal => "normal", + Kind::Build => "build", + Kind::Development => "dev", + } + .to_string(), + registry: dep_registry, + explicit_name_in_toml: dep.explicit_name_in_toml().map(|s| s.to_string()), + }) + }) + .collect::>>()?; + let manifest = pkg.manifest(); + let ManifestMetadata { + ref authors, + ref description, + ref homepage, + ref documentation, + ref keywords, + ref readme, + ref repository, + ref license, + ref license_file, + ref categories, + ref badges, + ref links, + } = *manifest.metadata(); + let readme_content = match *readme { + Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), + None => None, + }; + if let Some(ref file) = *license_file { + if fs::metadata(&pkg.root().join(file)).is_err() { + failure::bail!("the license file `{}` does not exist", file) + } + } + + // Do not upload if performing a dry run + if dry_run { + config.shell().warn("aborting upload due to dry run")?; + return Ok(()); + } + + let summary = pkg.summary(); + let string_features = summary + .features() + .iter() + .map(|(feat, values)| { + ( + feat.to_string(), + values.iter().map(|fv| fv.to_string(&summary)).collect(), + ) + }) + .collect::>>(); + + let publish = registry.publish( + &NewCrate { + name: pkg.name().to_string(), + vers: pkg.version().to_string(), + deps, + features: string_features, + authors: authors.clone(), + description: description.clone(), + homepage: homepage.clone(), + documentation: documentation.clone(), + keywords: keywords.clone(), + categories: categories.clone(), + readme: readme_content, + readme_file: readme.clone(), + repository: repository.clone(), + license: license.clone(), + license_file: license_file.clone(), + badges: badges.clone(), + links: links.clone(), + }, + tarball, + ); + + match publish { + Ok(warnings) => { + if !warnings.invalid_categories.is_empty() { + let msg = format!( + "\ + the following are not valid category slugs and were \ + ignored: {}. Please see https://crates.io/category_slugs \ + for the list of all category slugs. \ + ", + warnings.invalid_categories.join(", ") + ); + config.shell().warn(&msg)?; + } + + if !warnings.invalid_badges.is_empty() { + let msg = format!( + "\ + the following are not valid badges and were ignored: {}. \ + Either the badge type specified is unknown or a required \ + attribute is missing. Please see \ + http://doc.crates.io/manifest.html#package-metadata \ + for valid badge types and their required attributes.", + warnings.invalid_badges.join(", ") + ); + config.shell().warn(&msg)?; + } + + if !warnings.other.is_empty() { + for msg in warnings.other { + config.shell().warn(&msg)?; + } + } + + Ok(()) + } + Err(e) => Err(e), + } +} + +pub fn registry_configuration( + config: &Config, + registry: Option, +) -> CargoResult { + let (index, token) = match registry { + Some(registry) => { + validate_package_name(®istry, "registry name", "")?; + ( + Some(config.get_registry_index(®istry)?.to_string()), + config + .get_string(&format!("registries.{}.token", registry))? + .map(|p| p.val), + ) + } + None => { + // Checking out for default index and token + ( + config.get_string("registry.index")?.map(|p| p.val), + config.get_string("registry.token")?.map(|p| p.val), + ) + } + }; + + Ok(RegistryConfig { index, token }) +} + +pub fn registry( + config: &Config, + token: Option, + index: Option, + registry: Option, + force_update: bool, +) -> CargoResult<(Registry, SourceId)> { + // Parse all configuration options + let RegistryConfig { + token: token_config, + index: index_config, + } = registry_configuration(config, registry.clone())?; + let token = token.or(token_config); + let sid = get_source_id(config, index_config.or(index), registry)?; + let api_host = { + let mut src = RegistrySource::remote(sid, &HashSet::new(), config); + // Only update the index if the config is not available or `force` is set. + let cfg = src.config(); + let cfg = if force_update || cfg.is_err() { + src.update() + .chain_err(|| format!("failed to update {}", sid))?; + cfg.or_else(|_| src.config())? + } else { + cfg.unwrap() + }; + cfg.and_then(|cfg| cfg.api) + .ok_or_else(|| failure::format_err!("{} does not support API commands", sid))? + }; + let handle = http_handle(config)?; + Ok((Registry::new_handle(api_host, token, handle), sid)) +} + +/// Creates a new HTTP handle with appropriate global configuration for cargo. +pub fn http_handle(config: &Config) -> CargoResult { + let (mut handle, timeout) = http_handle_and_timeout(config)?; + timeout.configure(&mut handle)?; + Ok(handle) +} + +pub fn http_handle_and_timeout(config: &Config) -> CargoResult<(Easy, HttpTimeout)> { + if config.frozen() { + failure::bail!( + "attempting to make an HTTP request, but --frozen was \ + specified" + ) + } + if !config.network_allowed() { + failure::bail!("can't make HTTP request in the offline mode") + } + + // The timeout option for libcurl by default times out the entire transfer, + // but we probably don't want this. Instead we only set timeouts for the + // connect phase as well as a "low speed" timeout so if we don't receive + // many bytes in a large-ish period of time then we time out. + let mut handle = Easy::new(); + let timeout = configure_http_handle(config, &mut handle)?; + Ok((handle, timeout)) +} + +pub fn needs_custom_http_transport(config: &Config) -> CargoResult { + let proxy_exists = http_proxy_exists(config)?; + let timeout = HttpTimeout::new(config)?.is_non_default(); + let cainfo = config.get_path("http.cainfo")?; + let check_revoke = config.get_bool("http.check-revoke")?; + let user_agent = config.get_string("http.user-agent")?; + + Ok(proxy_exists + || timeout + || cainfo.is_some() + || check_revoke.is_some() + || user_agent.is_some()) +} + +/// Configure a libcurl http handle with the defaults options for Cargo +pub fn configure_http_handle(config: &Config, handle: &mut Easy) -> CargoResult { + if let Some(proxy) = http_proxy(config)? { + handle.proxy(&proxy)?; + } + if let Some(cainfo) = config.get_path("http.cainfo")? { + handle.cainfo(&cainfo.val)?; + } + if let Some(check) = config.get_bool("http.check-revoke")? { + handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; + } + if let Some(user_agent) = config.get_string("http.user-agent")? { + handle.useragent(&user_agent.val)?; + } else { + handle.useragent(&version().to_string())?; + } + + if let Some(true) = config.get::>("http.debug")? { + handle.verbose(true)?; + handle.debug_function(|kind, data| { + let (prefix, level) = match kind { + InfoType::Text => ("*", Level::Debug), + InfoType::HeaderIn => ("<", Level::Debug), + InfoType::HeaderOut => (">", Level::Debug), + InfoType::DataIn => ("{", Level::Trace), + InfoType::DataOut => ("}", Level::Trace), + InfoType::SslDataIn | InfoType::SslDataOut => return, + _ => return, + }; + match str::from_utf8(data) { + Ok(s) => { + for line in s.lines() { + log!(level, "http-debug: {} {}", prefix, line); + } + } + Err(_) => { + log!( + level, + "http-debug: {} ({} bytes of data)", + prefix, + data.len() + ); + } + } + })?; + } + + HttpTimeout::new(config) +} + +#[must_use] +pub struct HttpTimeout { + pub dur: Duration, + pub low_speed_limit: u32, +} + +impl HttpTimeout { + pub fn new(config: &Config) -> CargoResult { + let low_speed_limit = config + .get::>("http.low-speed-limit")? + .unwrap_or(10); + let seconds = config + .get::>("http.timeout")? + .or_else(|| env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) + .unwrap_or(30); + Ok(HttpTimeout { + dur: Duration::new(seconds, 0), + low_speed_limit, + }) + } + + fn is_non_default(&self) -> bool { + self.dur != Duration::new(30, 0) || self.low_speed_limit != 10 + } + + pub fn configure(&self, handle: &mut Easy) -> CargoResult<()> { + // The timeout option for libcurl by default times out the entire + // transfer, but we probably don't want this. Instead we only set + // timeouts for the connect phase as well as a "low speed" timeout so + // if we don't receive many bytes in a large-ish period of time then we + // time out. + handle.connect_timeout(self.dur)?; + handle.low_speed_time(self.dur)?; + handle.low_speed_limit(self.low_speed_limit)?; + Ok(()) + } +} + +/// Finds an explicit HTTP proxy if one is available. +/// +/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified +/// via environment variables are picked up by libcurl. +fn http_proxy(config: &Config) -> CargoResult> { + if let Some(s) = config.get_string("http.proxy")? { + return Ok(Some(s.val)); + } + if let Ok(cfg) = git2::Config::open_default() { + if let Ok(s) = cfg.get_str("http.proxy") { + return Ok(Some(s.to_string())); + } + } + Ok(None) +} + +/// Determine if an http proxy exists. +/// +/// Checks the following for existence, in order: +/// +/// * cargo's `http.proxy` +/// * git's `http.proxy` +/// * `http_proxy` env var +/// * `HTTP_PROXY` env var +/// * `https_proxy` env var +/// * `HTTPS_PROXY` env var +fn http_proxy_exists(config: &Config) -> CargoResult { + if http_proxy(config)?.is_some() { + Ok(true) + } else { + Ok(["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"] + .iter() + .any(|v| env::var(v).is_ok())) + } +} + +pub fn registry_login( + config: &Config, + token: Option, + reg: Option, +) -> CargoResult<()> { + let (registry, _) = registry(config, token.clone(), None, reg.clone(), false)?; + + let token = match token { + Some(token) => token, + None => { + println!( + "please visit {}/me and paste the API Token below", + registry.host() + ); + let mut line = String::new(); + let input = io::stdin(); + input + .lock() + .read_line(&mut line) + .chain_err(|| "failed to read stdin") + .map_err(failure::Error::from)?; + line.trim().to_string() + } + }; + + let RegistryConfig { + token: old_token, .. + } = registry_configuration(config, reg.clone())?; + + if let Some(old_token) = old_token { + if old_token == token { + config.shell().status("Login", "already logged in")?; + return Ok(()); + } + } + + config::save_credentials(config, token, reg.clone())?; + config.shell().status( + "Login", + format!( + "token for `{}` saved", + reg.as_ref().map_or("crates.io", String::as_str) + ), + )?; + Ok(()) +} + +pub struct OwnersOptions { + pub krate: Option, + pub token: Option, + pub index: Option, + pub to_add: Option>, + pub to_remove: Option>, + pub list: bool, + pub registry: Option, +} + +pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> { + let name = match opts.krate { + Some(ref name) => name.clone(), + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + + let (mut registry, _) = registry( + config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + true, + )?; + + if let Some(ref v) = opts.to_add { + let v = v.iter().map(|s| &s[..]).collect::>(); + let msg = registry.add_owners(&name, &v).map_err(|e| { + failure::format_err!("failed to invite owners to crate {}: {}", name, e) + })?; + + config.shell().status("Owner", msg)?; + } + + if let Some(ref v) = opts.to_remove { + let v = v.iter().map(|s| &s[..]).collect::>(); + config + .shell() + .status("Owner", format!("removing {:?} from crate {}", v, name))?; + registry + .remove_owners(&name, &v) + .chain_err(|| format!("failed to remove owners from crate {}", name))?; + } + + if opts.list { + let owners = registry + .list_owners(&name) + .chain_err(|| format!("failed to list owners of crate {}", name))?; + for owner in owners.iter() { + print!("{}", owner.login); + match (owner.name.as_ref(), owner.email.as_ref()) { + (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), + (Some(s), None) | (None, Some(s)) => println!(" ({})", s), + (None, None) => println!(), + } + } + } + + Ok(()) +} + +pub fn yank( + config: &Config, + krate: Option, + version: Option, + token: Option, + index: Option, + undo: bool, + reg: Option, +) -> CargoResult<()> { + let name = match krate { + Some(name) => name, + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + let version = match version { + Some(v) => v, + None => failure::bail!("a version must be specified to yank"), + }; + + let (mut registry, _) = registry(config, token, index, reg, true)?; + + if undo { + config + .shell() + .status("Unyank", format!("{}:{}", name, version))?; + registry + .unyank(&name, &version) + .chain_err(|| "failed to undo a yank")?; + } else { + config + .shell() + .status("Yank", format!("{}:{}", name, version))?; + registry + .yank(&name, &version) + .chain_err(|| "failed to yank")?; + } + + Ok(()) +} + +fn get_source_id( + config: &Config, + index: Option, + reg: Option, +) -> CargoResult { + match (reg, index) { + (Some(r), _) => SourceId::alt_registry(config, &r), + (_, Some(i)) => SourceId::for_registry(&i.to_url()?), + _ => { + let map = SourceConfigMap::new(config)?; + let src = map.load(SourceId::crates_io(config)?, &HashSet::new())?; + Ok(src.replaced_source_id()) + } + } +} + +pub fn search( + query: &str, + config: &Config, + index: Option, + limit: u32, + reg: Option, +) -> CargoResult<()> { + fn truncate_with_ellipsis(s: &str, max_width: usize) -> String { + // We should truncate at grapheme-boundary and compute character-widths, + // yet the dependencies on unicode-segmentation and unicode-width are + // not worth it. + let mut chars = s.chars(); + let mut prefix = (&mut chars).take(max_width - 1).collect::(); + if chars.next().is_some() { + prefix.push('…'); + } + prefix + } + + let (mut registry, source_id) = registry(config, None, index, reg, false)?; + let (crates, total_crates) = registry + .search(query, limit) + .chain_err(|| "failed to retrieve search results from the registry")?; + + let names = crates + .iter() + .map(|krate| format!("{} = \"{}\"", krate.name, krate.max_version)) + .collect::>(); + + let description_margin = names.iter().map(|s| s.len() + 4).max().unwrap_or_default(); + + let description_length = cmp::max(80, 128 - description_margin); + + let descriptions = crates.iter().map(|krate| { + krate + .description + .as_ref() + .map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), description_length)) + }); + + for (name, description) in names.into_iter().zip(descriptions) { + let line = match description { + Some(desc) => { + let space = repeat(' ') + .take(description_margin - name.len()) + .collect::(); + name + &space + "# " + &desc + } + None => name, + }; + println!("{}", line); + } + + let search_max_limit = 100; + if total_crates > limit && limit < search_max_limit { + println!( + "... and {} crates more (use --limit N to see more)", + total_crates - limit + ); + } else if total_crates > limit && limit >= search_max_limit { + let extra = if source_id.is_default_registry() { + format!( + " (go to http://crates.io/search?q={} to see more)", + percent_encode(query.as_bytes(), QUERY_ENCODE_SET) + ) + } else { + String::new() + }; + println!("... and {} crates more{}", total_crates - limit, extra); + } + + Ok(()) +} diff --git a/src/cargo/ops/resolve.rs b/src/cargo/ops/resolve.rs new file mode 100644 index 000000000..5d87bc4d2 --- /dev/null +++ b/src/cargo/ops/resolve.rs @@ -0,0 +1,593 @@ +use std::collections::HashSet; + +use log::{debug, trace}; + +use crate::core::registry::PackageRegistry; +use crate::core::resolver::{self, Method, Resolve}; +use crate::core::{PackageId, PackageIdSpec, PackageSet, Source, SourceId, Workspace}; +use crate::ops; +use crate::sources::PathSource; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::profile; + +const UNUSED_PATCH_WARNING: &str = "\ +Check that the patched package version and available features are compatible +with the dependency requirements. If the patch has a different version from +what is locked in the Cargo.lock file, run `cargo update` to use the new +version. This may also occur with an optional dependency that is not enabled."; + +/// Resolves all dependencies for the workspace using the previous +/// lock file as a guide if present. +/// +/// This function will also write the result of resolution as a new +/// lock file. +pub fn resolve_ws<'a>(ws: &Workspace<'a>) -> CargoResult<(PackageSet<'a>, Resolve)> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = resolve_with_registry(ws, &mut registry, true)?; + let packages = get_resolved_packages(&resolve, registry)?; + Ok((packages, resolve)) +} + +/// Resolves dependencies for some packages of the workspace, +/// taking into account `paths` overrides and activated features. +pub fn resolve_ws_precisely<'a>( + ws: &Workspace<'a>, + source: Option>, + features: &[String], + all_features: bool, + no_default_features: bool, + specs: &[PackageIdSpec], +) -> CargoResult<(PackageSet<'a>, Resolve)> { + let features = Method::split_features(features); + let method = if all_features { + Method::Everything + } else { + Method::Required { + dev_deps: true, + features: &features, + all_features: false, + uses_default_features: !no_default_features, + } + }; + resolve_ws_with_method(ws, source, method, specs) +} + +pub fn resolve_ws_with_method<'a>( + ws: &Workspace<'a>, + source: Option>, + method: Method<'_>, + specs: &[PackageIdSpec], +) -> CargoResult<(PackageSet<'a>, Resolve)> { + let mut registry = PackageRegistry::new(ws.config())?; + if let Some(source) = source { + registry.add_preloaded(source); + } + let mut add_patches = true; + + let resolve = if ws.require_optional_deps() { + // First, resolve the root_package's *listed* dependencies, as well as + // downloading and updating all remotes and such. + let resolve = resolve_with_registry(ws, &mut registry, false)?; + add_patches = false; + + // Second, resolve with precisely what we're doing. Filter out + // transitive dependencies if necessary, specify features, handle + // overrides, etc. + let _p = profile::start("resolving with overrides..."); + + add_overrides(&mut registry, ws)?; + + for &(ref replace_spec, ref dep) in ws.root_replace() { + if !resolve + .iter() + .any(|r| replace_spec.matches(r) && !dep.matches_id(r)) + { + ws.config() + .shell() + .warn(format!("package replacement is not used: {}", replace_spec))? + } + } + + Some(resolve) + } else { + ops::load_pkg_lockfile(ws)? + }; + + let resolved_with_overrides = ops::resolve_with_previous( + &mut registry, + ws, + method, + resolve.as_ref(), + None, + specs, + add_patches, + true, + )?; + + let packages = get_resolved_packages(&resolved_with_overrides, registry)?; + + Ok((packages, resolved_with_overrides)) +} + +fn resolve_with_registry<'cfg>( + ws: &Workspace<'cfg>, + registry: &mut PackageRegistry<'cfg>, + warn: bool, +) -> CargoResult { + let prev = ops::load_pkg_lockfile(ws)?; + let resolve = resolve_with_previous( + registry, + ws, + Method::Everything, + prev.as_ref(), + None, + &[], + true, + warn, + )?; + + if !ws.is_ephemeral() { + ops::write_pkg_lockfile(ws, &resolve)?; + } + Ok(resolve) +} + +/// Resolves all dependencies for a package using an optional previous instance. +/// of resolve to guide the resolution process. +/// +/// This also takes an optional hash set, `to_avoid`, which is a list of package +/// IDs that should be avoided when consulting the previous instance of resolve +/// (often used in pairings with updates). +/// +/// The previous resolve normally comes from a lock file. This function does not +/// read or write lock files from the filesystem. +pub fn resolve_with_previous<'cfg>( + registry: &mut PackageRegistry<'cfg>, + ws: &Workspace<'cfg>, + method: Method<'_>, + previous: Option<&Resolve>, + to_avoid: Option<&HashSet>, + specs: &[PackageIdSpec], + register_patches: bool, + warn: bool, +) -> CargoResult { + // Here we place an artificial limitation that all non-registry sources + // cannot be locked at more than one revision. This means that if a Git + // repository provides more than one package, they must all be updated in + // step when any of them are updated. + // + // TODO: this seems like a hokey reason to single out the registry as being + // different. + let mut to_avoid_sources: HashSet = HashSet::new(); + if let Some(to_avoid) = to_avoid { + to_avoid_sources.extend( + to_avoid + .iter() + .map(|p| p.source_id()) + .filter(|s| !s.is_registry()), + ); + } + + let keep = |p: &PackageId| { + !to_avoid_sources.contains(&p.source_id()) + && match to_avoid { + Some(set) => !set.contains(p), + None => true, + } + }; + + // In the case where a previous instance of resolve is available, we + // want to lock as many packages as possible to the previous version + // without disturbing the graph structure. + let mut try_to_use = HashSet::new(); + if let Some(r) = previous { + trace!("previous: {:?}", r); + register_previous_locks(ws, registry, r, &keep); + + // Everything in the previous lock file we want to keep is prioritized + // in dependency selection if it comes up, aka we want to have + // conservative updates. + try_to_use.extend(r.iter().filter(keep).inspect(|id| { + debug!("attempting to prefer {}", id); + })); + } + + if register_patches { + for (url, patches) in ws.root_patch() { + let previous = match previous { + Some(r) => r, + None => { + registry.patch(url, patches)?; + continue; + } + }; + let patches = patches + .iter() + .map(|dep| { + let unused = previous.unused_patches().iter().cloned(); + let candidates = previous.iter().chain(unused); + match candidates.filter(keep).find(|&id| dep.matches_id(id)) { + Some(id) => { + let mut dep = dep.clone(); + dep.lock_to(id); + dep + } + None => dep.clone(), + } + }) + .collect::>(); + registry.patch(url, &patches)?; + } + + registry.lock_patches(); + } + + for member in ws.members() { + registry.add_sources(Some(member.package_id().source_id()))?; + } + + let mut summaries = Vec::new(); + if ws.config().cli_unstable().package_features { + let mut members = Vec::new(); + match method { + Method::Everything => members.extend(ws.members()), + Method::Required { + features, + all_features, + uses_default_features, + .. + } => { + if specs.len() > 1 && !features.is_empty() { + failure::bail!("cannot specify features for more than one package"); + } + members.extend( + ws.members() + .filter(|m| specs.iter().any(|spec| spec.matches(m.package_id()))), + ); + // Edge case: running `cargo build -p foo`, where `foo` is not a member + // of current workspace. Add all packages from workspace to get `foo` + // into the resolution graph. + if members.is_empty() { + if !(features.is_empty() && !all_features && uses_default_features) { + failure::bail!("cannot specify features for packages outside of workspace"); + } + members.extend(ws.members()); + } + } + } + for member in members { + let summary = registry.lock(member.summary().clone()); + summaries.push((summary, method)) + } + } else { + for member in ws.members() { + let method_to_resolve = match method { + // When everything for a workspace we want to be sure to resolve all + // members in the workspace, so propagate the `Method::Everything`. + Method::Everything => Method::Everything, + + // If we're not resolving everything though then we're constructing the + // exact crate graph we're going to build. Here we don't necessarily + // want to keep around all workspace crates as they may not all be + // built/tested. + // + // Additionally, the `method` specified represents command line + // flags, which really only matters for the current package + // (determined by the cwd). If other packages are specified (via + // `-p`) then the command line flags like features don't apply to + // them. + // + // As a result, if this `member` is the current member of the + // workspace, then we use `method` specified. Otherwise we use a + // base method with no features specified but using default features + // for any other packages specified with `-p`. + Method::Required { + dev_deps, + all_features, + .. + } => { + let base = Method::Required { + dev_deps, + features: &[], + all_features, + uses_default_features: true, + }; + let member_id = member.package_id(); + match ws.current_opt() { + Some(current) if member_id == current.package_id() => method, + _ => { + if specs.iter().any(|spec| spec.matches(member_id)) { + base + } else { + continue; + } + } + } + } + }; + + let summary = registry.lock(member.summary().clone()); + summaries.push((summary, method_to_resolve)); + } + }; + + let root_replace = ws.root_replace(); + + let replace = match previous { + Some(r) => root_replace + .iter() + .map(|&(ref spec, ref dep)| { + for (&key, &val) in r.replacements().iter() { + if spec.matches(key) && dep.matches_id(val) && keep(&val) { + let mut dep = dep.clone(); + dep.lock_to(val); + return (spec.clone(), dep); + } + } + (spec.clone(), dep.clone()) + }) + .collect::>(), + None => root_replace.to_vec(), + }; + + ws.preload(registry); + let mut resolved = resolver::resolve( + &summaries, + &replace, + registry, + &try_to_use, + Some(ws.config()), + warn, + )?; + resolved.register_used_patches(registry.patches()); + if register_patches { + // It would be good if this warning was more targeted and helpful + // (such as showing close candidates that failed to match). However, + // that's not terribly easy to do, so just show a general help + // message. + let warnings: Vec = resolved + .unused_patches() + .iter() + .map(|pkgid| format!("Patch `{}` was not used in the crate graph.", pkgid)) + .collect(); + if !warnings.is_empty() { + ws.config().shell().warn(format!( + "{}\n{}", + warnings.join("\n"), + UNUSED_PATCH_WARNING + ))?; + } + } + if let Some(previous) = previous { + resolved.merge_from(previous)?; + } + Ok(resolved) +} + +/// Read the `paths` configuration variable to discover all path overrides that +/// have been configured. +pub fn add_overrides<'a>( + registry: &mut PackageRegistry<'a>, + ws: &Workspace<'a>, +) -> CargoResult<()> { + let paths = match ws.config().get_list("paths")? { + Some(list) => list, + None => return Ok(()), + }; + + let paths = paths.val.iter().map(|&(ref s, ref p)| { + // The path listed next to the string is the config file in which the + // key was located, so we want to pop off the `.cargo/config` component + // to get the directory containing the `.cargo` folder. + (p.parent().unwrap().parent().unwrap().join(s), p) + }); + + for (path, definition) in paths { + let id = SourceId::for_path(&path)?; + let mut source = PathSource::new_recursive(&path, id, ws.config()); + source.update().chain_err(|| { + format!( + "failed to update path override `{}` \ + (defined in `{}`)", + path.display(), + definition.display() + ) + })?; + registry.add_override(Box::new(source)); + } + Ok(()) +} + +pub fn get_resolved_packages<'a>( + resolve: &Resolve, + registry: PackageRegistry<'a>, +) -> CargoResult> { + let ids: Vec = resolve.iter().collect(); + registry.get(&ids) +} + +/// In this function we're responsible for informing the `registry` of all +/// locked dependencies from the previous lock file we had, `resolve`. +/// +/// This gets particularly tricky for a couple of reasons. The first is that we +/// want all updates to be conservative, so we actually want to take the +/// `resolve` into account (and avoid unnecessary registry updates and such). +/// the second, however, is that we want to be resilient to updates of +/// manifests. For example if a dependency is added or a version is changed we +/// want to make sure that we properly re-resolve (conservatively) instead of +/// providing an opaque error. +/// +/// The logic here is somewhat subtle, but there should be more comments below to +/// clarify things. +/// +/// Note that this function, at the time of this writing, is basically the +/// entire fix for issue #4127. +fn register_previous_locks( + ws: &Workspace<'_>, + registry: &mut PackageRegistry<'_>, + resolve: &Resolve, + keep: &dyn Fn(&PackageId) -> bool, +) { + let path_pkg = |id: SourceId| { + if !id.is_path() { + return None; + } + if let Ok(path) = id.url().to_file_path() { + if let Ok(pkg) = ws.load(&path.join("Cargo.toml")) { + return Some(pkg); + } + } + None + }; + + // Ok so we've been passed in a `keep` function which basically says "if I + // return `true` then this package wasn't listed for an update on the command + // line". That is, if we run `cargo update -p foo` then `keep(bar)` will return + // `true`, whereas `keep(foo)` will return `false` (roughly speaking). + // + // This isn't actually quite what we want, however. Instead we want to + // further refine this `keep` function with *all transitive dependencies* of + // the packages we're not keeping. For example, consider a case like this: + // + // * There's a crate `log`. + // * There's a crate `serde` which depends on `log`. + // + // Let's say we then run `cargo update -p serde`. This may *also* want to + // update the `log` dependency as our newer version of `serde` may have a + // new minimum version required for `log`. Now this isn't always guaranteed + // to work. What'll happen here is we *won't* lock the `log` dependency nor + // the `log` crate itself, but we will inform the registry "please prefer + // this version of `log`". That way if our newer version of serde works with + // the older version of `log`, we conservatively won't update `log`. If, + // however, nothing else in the dependency graph depends on `log` and the + // newer version of `serde` requires a new version of `log` it'll get pulled + // in (as we didn't accidentally lock it to an old version). + // + // Additionally, here we process all path dependencies listed in the previous + // resolve. They can not only have their dependencies change but also + // the versions of the package change as well. If this ends up happening + // then we want to make sure we don't lock a package ID node that doesn't + // actually exist. Note that we don't do transitive visits of all the + // package's dependencies here as that'll be covered below to poison those + // if they changed. + let mut avoid_locking = HashSet::new(); + registry.add_to_yanked_whitelist(resolve.iter().filter(keep)); + for node in resolve.iter() { + if !keep(&node) { + add_deps(resolve, node, &mut avoid_locking); + } else if let Some(pkg) = path_pkg(node.source_id()) { + if pkg.package_id() != node { + avoid_locking.insert(node); + } + } + } + + // Ok, but the above loop isn't the entire story! Updates to the dependency + // graph can come from two locations, the `cargo update` command or + // manifests themselves. For example a manifest on the filesystem may + // have been updated to have an updated version requirement on `serde`. In + // this case both `keep(serde)` and `keep(log)` return `true` (the `keep` + // that's an argument to this function). We, however, don't want to keep + // either of those! Otherwise we'll get obscure resolve errors about locked + // versions. + // + // To solve this problem we iterate over all packages with path sources + // (aka ones with manifests that are changing) and take a look at all of + // their dependencies. If any dependency does not match something in the + // previous lock file, then we're guaranteed that the main resolver will + // update the source of this dependency no matter what. Knowing this we + // poison all packages from the same source, forcing them all to get + // updated. + // + // This may seem like a heavy hammer, and it is! It means that if you change + // anything from crates.io then all of crates.io becomes unlocked. Note, + // however, that we still want conservative updates. This currently happens + // because the first candidate the resolver picks is the previously locked + // version, and only if that fails to activate to we move on and try + // a different version. (giving the guise of conservative updates) + // + // For example let's say we had `serde = "0.1"` written in our lock file. + // When we later edit this to `serde = "0.1.3"` we don't want to lock serde + // at its old version, 0.1.1. Instead we want to allow it to update to + // `0.1.3` and update its own dependencies (like above). To do this *all + // crates from crates.io* are not locked (aka added to `avoid_locking`). + // For dependencies like `log` their previous version in the lock file will + // come up first before newer version, if newer version are available. + let mut path_deps = ws.members().cloned().collect::>(); + let mut visited = HashSet::new(); + while let Some(member) = path_deps.pop() { + if !visited.insert(member.package_id()) { + continue; + } + for dep in member.dependencies() { + // If this dependency didn't match anything special then we may want + // to poison the source as it may have been added. If this path + // dependencies is **not** a workspace member, however, and it's an + // optional/non-transitive dependency then it won't be necessarily + // be in our lock file. If this shows up then we avoid poisoning + // this source as otherwise we'd repeatedly update the registry. + // + // TODO: this breaks adding an optional dependency in a + // non-workspace member and then simultaneously editing the + // dependency on that crate to enable the feature. For now, + // this bug is better than the always-updating registry though. + if !ws + .members() + .any(|pkg| pkg.package_id() == member.package_id()) + && (dep.is_optional() || !dep.is_transitive()) + { + continue; + } + + // If this is a path dependency, then try to push it onto our + // worklist. + if let Some(pkg) = path_pkg(dep.source_id()) { + path_deps.push(pkg); + continue; + } + + // If we match *anything* in the dependency graph then we consider + // ourselves all ok, and assume that we'll resolve to that. + if resolve.iter().any(|id| dep.matches_ignoring_source(id)) { + continue; + } + + // Ok if nothing matches, then we poison the source of these + // dependencies and the previous lock file. + debug!( + "poisoning {} because {} looks like it changed {}", + dep.source_id(), + member.package_id(), + dep.package_name() + ); + for id in resolve + .iter() + .filter(|id| id.source_id() == dep.source_id()) + { + add_deps(resolve, id, &mut avoid_locking); + } + } + } + + // Alright now that we've got our new, fresh, shiny, and refined `keep` + // function let's put it to action. Take a look at the previous lock file, + // filter everything by this callback, and then shove everything else into + // the registry as a locked dependency. + let keep = |id: &PackageId| keep(id) && !avoid_locking.contains(id); + + for node in resolve.iter().filter(keep) { + let deps = resolve.deps_not_replaced(node).filter(keep).collect(); + registry.register_lock(node, deps); + } + + /// Recursively add `node` and all its transitive dependencies to `set`. + fn add_deps(resolve: &Resolve, node: PackageId, set: &mut HashSet) { + if !set.insert(node) { + return; + } + debug!("ignoring any lock pointing directly at {}", node); + for dep in resolve.deps_not_replaced(node) { + add_deps(resolve, dep, set); + } + } +} diff --git a/src/cargo/sources/config.rs b/src/cargo/sources/config.rs new file mode 100644 index 000000000..e1975fc62 --- /dev/null +++ b/src/cargo/sources/config.rs @@ -0,0 +1,256 @@ +//! Implementation of configuration for various sources +//! +//! This module will parse the various `source.*` TOML configuration keys into a +//! structure usable by Cargo itself. Currently this is primarily used to map +//! sources to one another via the `replace-with` key in `.cargo/config`. + +use std::collections::{HashMap, HashSet}; +use std::path::{Path, PathBuf}; + +use log::debug; +use url::Url; + +use crate::core::{GitReference, PackageId, Source, SourceId}; +use crate::sources::{ReplacedSource, CRATES_IO_REGISTRY}; +use crate::util::config::ConfigValue; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{Config, ToUrl}; + +#[derive(Clone)] +pub struct SourceConfigMap<'cfg> { + cfgs: HashMap, + id2name: HashMap, + config: &'cfg Config, +} + +/// Configuration for a particular source, found in TOML looking like: +/// +/// ```toml +/// [source.crates-io] +/// registry = 'https://github.com/rust-lang/crates.io-index' +/// replace-with = 'foo' # optional +/// ``` +#[derive(Clone)] +struct SourceConfig { + // id this source corresponds to, inferred from the various defined keys in + // the configuration + id: SourceId, + + // Name of the source that this source should be replaced with. This field + // is a tuple of (name, path) where path is where this configuration key was + // defined (the literal `.cargo/config` file). + replace_with: Option<(String, PathBuf)>, +} + +impl<'cfg> SourceConfigMap<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap::empty(config)?; + if let Some(table) = config.get_table("source")? { + for (key, value) in table.val.iter() { + base.add_config(key, value)?; + } + } + Ok(base) + } + + pub fn empty(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap { + cfgs: HashMap::new(), + id2name: HashMap::new(), + config, + }; + base.add( + CRATES_IO_REGISTRY, + SourceConfig { + id: SourceId::crates_io(config)?, + replace_with: None, + }, + ); + Ok(base) + } + + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn load( + &self, + id: SourceId, + yanked_whitelist: &HashSet, + ) -> CargoResult> { + debug!("loading: {}", id); + let mut name = match self.id2name.get(&id) { + Some(name) => name, + None => return Ok(id.load(self.config, yanked_whitelist)?), + }; + let mut path = Path::new("/"); + let orig_name = name; + let new_id; + loop { + let cfg = match self.cfgs.get(name) { + Some(cfg) => cfg, + None => failure::bail!( + "could not find a configured source with the \ + name `{}` when attempting to lookup `{}` \ + (configuration in `{}`)", + name, + orig_name, + path.display() + ), + }; + match cfg.replace_with { + Some((ref s, ref p)) => { + name = s; + path = p; + } + None if id == cfg.id => return Ok(id.load(self.config, yanked_whitelist)?), + None => { + new_id = cfg.id.with_precise(id.precise().map(|s| s.to_string())); + break; + } + } + debug!("following pointer to {}", name); + if name == orig_name { + failure::bail!( + "detected a cycle of `replace-with` sources, the source \ + `{}` is eventually replaced with itself \ + (configuration in `{}`)", + name, + path.display() + ) + } + } + + let new_src = new_id.load( + self.config, + &yanked_whitelist + .iter() + .map(|p| p.map_source(id, new_id)) + .collect(), + )?; + let old_src = id.load(self.config, yanked_whitelist)?; + if !new_src.supports_checksums() && old_src.supports_checksums() { + failure::bail!( + "\ +cannot replace `{orig}` with `{name}`, the source `{orig}` supports \ +checksums, but `{name}` does not + +a lock file compatible with `{orig}` cannot be generated in this situation +", + orig = orig_name, + name = name + ); + } + + if old_src.requires_precise() && id.precise().is_none() { + failure::bail!( + "\ +the source {orig} requires a lock file to be present first before it can be +used against vendored source code + +remove the source replacement configuration, generate a lock file, and then +restore the source replacement configuration to continue the build +", + orig = orig_name + ); + } + + Ok(Box::new(ReplacedSource::new(id, new_id, new_src))) + } + + fn add(&mut self, name: &str, cfg: SourceConfig) { + self.id2name.insert(cfg.id, name.to_string()); + self.cfgs.insert(name.to_string(), cfg); + } + + fn add_config(&mut self, name: &str, cfg: &ConfigValue) -> CargoResult<()> { + let (table, _path) = cfg.table(&format!("source.{}", name))?; + let mut srcs = Vec::new(); + if let Some(val) = table.get("registry") { + let url = url(val, &format!("source.{}.registry", name))?; + srcs.push(SourceId::for_registry(&url)?); + } + if let Some(val) = table.get("local-registry") { + let (s, path) = val.string(&format!("source.{}.local-registry", name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_local_registry(&path)?); + } + if let Some(val) = table.get("directory") { + let (s, path) = val.string(&format!("source.{}.directory", name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_directory(&path)?); + } + if let Some(val) = table.get("git") { + let url = url(val, &format!("source.{}.git", name))?; + let r#try = |s: &str| { + let val = match table.get(s) { + Some(s) => s, + None => return Ok(None), + }; + let key = format!("source.{}.{}", name, s); + val.string(&key).map(Some) + }; + let reference = match r#try("branch")? { + Some(b) => GitReference::Branch(b.0.to_string()), + None => match r#try("tag")? { + Some(b) => GitReference::Tag(b.0.to_string()), + None => match r#try("rev")? { + Some(b) => GitReference::Rev(b.0.to_string()), + None => GitReference::Branch("master".to_string()), + }, + }, + }; + srcs.push(SourceId::for_git(&url, reference)?); + } + if name == "crates-io" && srcs.is_empty() { + srcs.push(SourceId::crates_io(self.config)?); + } + + let mut srcs = srcs.into_iter(); + let src = srcs.next().ok_or_else(|| { + failure::format_err!( + "no source URL specified for `source.{}`, need \ + either `registry` or `local-registry` defined", + name + ) + })?; + if srcs.next().is_some() { + failure::bail!("more than one source URL specified for `source.{}`", name) + } + + let mut replace_with = None; + if let Some(val) = table.get("replace-with") { + let (s, path) = val.string(&format!("source.{}.replace-with", name))?; + replace_with = Some((s.to_string(), path.to_path_buf())); + } + + self.add( + name, + SourceConfig { + id: src, + replace_with, + }, + ); + + return Ok(()); + + fn url(cfg: &ConfigValue, key: &str) -> CargoResult { + let (url, path) = cfg.string(key)?; + let url = url.to_url().chain_err(|| { + format!( + "configuration key `{}` specified an invalid \ + URL (in {})", + key, + path.display() + ) + })?; + Ok(url) + } + } +} diff --git a/src/cargo/sources/directory.rs b/src/cargo/sources/directory.rs new file mode 100644 index 000000000..8009a149d --- /dev/null +++ b/src/cargo/sources/directory.rs @@ -0,0 +1,219 @@ +use std::collections::HashMap; +use std::fmt::{self, Debug, Formatter}; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; + +use serde::Deserialize; + +use crate::core::source::MaybePackage; +use crate::core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use crate::sources::PathSource; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::{Config, Sha256}; + +pub struct DirectorySource<'cfg> { + source_id: SourceId, + root: PathBuf, + packages: HashMap, + config: &'cfg Config, +} + +#[derive(Deserialize)] +struct Checksum { + package: Option, + files: HashMap, +} + +impl<'cfg> DirectorySource<'cfg> { + pub fn new(path: &Path, id: SourceId, config: &'cfg Config) -> DirectorySource<'cfg> { + DirectorySource { + source_id: id, + root: path.to_path_buf(), + config, + packages: HashMap::new(), + } + } +} + +impl<'cfg> Debug for DirectorySource<'cfg> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "DirectorySource {{ root: {:?} }}", self.root) + } +} + +impl<'cfg> Source for DirectorySource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let packages = self.packages.values().map(|p| &p.0); + let matches = packages.filter(|pkg| dep.matches(pkg.summary())); + for summary in matches.map(|pkg| pkg.summary().clone()) { + f(summary); + } + Ok(()) + } + + fn fuzzy_query(&mut self, _dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let packages = self.packages.values().map(|p| &p.0); + for summary in packages.map(|pkg| pkg.summary().clone()) { + f(summary); + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + true + } + + fn source_id(&self) -> SourceId { + self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + self.packages.clear(); + let entries = self.root.read_dir().chain_err(|| { + format!( + "failed to read root of directory source: {}", + self.root.display() + ) + })?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + // Ignore hidden/dot directories as they typically don't contain + // crates and otherwise may conflict with a VCS + // (rust-lang/cargo#3414). + if let Some(s) = path.file_name().and_then(|s| s.to_str()) { + if s.starts_with('.') { + continue; + } + } + + // Vendor directories are often checked into a VCS, but throughout + // the lifetime of a vendor dir crates are often added and deleted. + // Some VCS implementations don't always fully delete the directory + // when a dir is removed from a different checkout. Sometimes a + // mostly-empty dir is left behind. + // + // Additionally vendor directories are sometimes accompanied with + // readme files and other auxiliary information not too interesting + // to Cargo. + // + // To help handle all this we only try processing folders with a + // `Cargo.toml` in them. This has the upside of being pretty + // flexible with the contents of vendor directories but has the + // downside of accidentally misconfigured vendor directories + // silently returning less crates. + if !path.join("Cargo.toml").exists() { + continue; + } + + let mut src = PathSource::new(&path, self.source_id, self.config); + src.update()?; + let pkg = src.root_package()?; + + let cksum_file = path.join(".cargo-checksum.json"); + let cksum = paths::read(&path.join(cksum_file)).chain_err(|| { + format!( + "failed to load checksum `.cargo-checksum.json` \ + of {} v{}", + pkg.package_id().name(), + pkg.package_id().version() + ) + })?; + let cksum: Checksum = serde_json::from_str(&cksum).chain_err(|| { + format!( + "failed to decode `.cargo-checksum.json` of \ + {} v{}", + pkg.package_id().name(), + pkg.package_id().version() + ) + })?; + + let mut manifest = pkg.manifest().clone(); + let mut summary = manifest.summary().clone(); + if let Some(ref package) = cksum.package { + summary = summary.set_checksum(package.clone()); + } + manifest.set_summary(summary); + let pkg = Package::new(manifest, pkg.manifest_path()); + self.packages.insert(pkg.package_id(), (pkg, cksum)); + } + + Ok(()) + } + + fn download(&mut self, id: PackageId) -> CargoResult { + self.packages + .get(&id) + .map(|p| &p.0) + .cloned() + .map(MaybePackage::Ready) + .ok_or_else(|| failure::format_err!("failed to find package with id: {}", id)) + } + + fn finish_download(&mut self, _id: PackageId, _data: Vec) -> CargoResult { + panic!("no downloads to do") + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } + + fn verify(&self, id: PackageId) -> CargoResult<()> { + let (pkg, cksum) = match self.packages.get(&id) { + Some(&(ref pkg, ref cksum)) => (pkg, cksum), + None => failure::bail!("failed to find entry for `{}` in directory source", id), + }; + + let mut buf = [0; 16 * 1024]; + for (file, cksum) in cksum.files.iter() { + let mut h = Sha256::new(); + let file = pkg.root().join(file); + + (|| -> CargoResult<()> { + let mut f = File::open(&file)?; + loop { + match f.read(&mut buf)? { + 0 => return Ok(()), + n => h.update(&buf[..n]), + } + } + })() + .chain_err(|| format!("failed to calculate checksum of: {}", file.display()))?; + + let actual = hex::encode(h.finish()); + if &*actual != cksum { + failure::bail!( + "\ + the listed checksum of `{}` has changed:\n\ + expected: {}\n\ + actual: {}\n\ + \n\ + directory sources are not intended to be edited, if \ + modifications are required then it is recommended \ + that [replace] is used with a forked copy of the \ + source\ + ", + file.display(), + cksum, + actual + ); + } + } + + Ok(()) + } + + fn describe(&self) -> String { + format!("directory source `{}`", self.root.display()) + } + + fn add_to_yanked_whitelist(&mut self, _pkgs: &[PackageId]) {} +} diff --git a/src/cargo/sources/git/mod.rs b/src/cargo/sources/git/mod.rs new file mode 100644 index 000000000..86d0094d1 --- /dev/null +++ b/src/cargo/sources/git/mod.rs @@ -0,0 +1,4 @@ +pub use self::source::{canonicalize_url, GitSource}; +pub use self::utils::{fetch, GitCheckout, GitDatabase, GitRemote, GitRevision}; +mod source; +mod utils; diff --git a/src/cargo/sources/git/source.rs b/src/cargo/sources/git/source.rs new file mode 100644 index 000000000..ce1400c36 --- /dev/null +++ b/src/cargo/sources/git/source.rs @@ -0,0 +1,299 @@ +use std::fmt::{self, Debug, Formatter}; + +use log::trace; +use url::Url; + +use crate::core::source::{MaybePackage, Source, SourceId}; +use crate::core::GitReference; +use crate::core::{Dependency, Package, PackageId, Summary}; +use crate::sources::git::utils::{GitRemote, GitRevision}; +use crate::sources::PathSource; +use crate::util::errors::CargoResult; +use crate::util::hex::short_hash; +use crate::util::Config; + +pub struct GitSource<'cfg> { + remote: GitRemote, + reference: GitReference, + source_id: SourceId, + path_source: Option>, + rev: Option, + ident: String, + config: &'cfg Config, +} + +impl<'cfg> GitSource<'cfg> { + pub fn new(source_id: SourceId, config: &'cfg Config) -> CargoResult> { + assert!(source_id.is_git(), "id is not git, id={}", source_id); + + let remote = GitRemote::new(source_id.url()); + let ident = ident(source_id.url())?; + + let reference = match source_id.precise() { + Some(s) => GitReference::Rev(s.to_string()), + None => source_id.git_reference().unwrap().clone(), + }; + + let source = GitSource { + remote, + reference, + source_id, + path_source: None, + rev: None, + ident, + config, + }; + + Ok(source) + } + + pub fn url(&self) -> &Url { + self.remote.url() + } + + pub fn read_packages(&mut self) -> CargoResult> { + if self.path_source.is_none() { + self.update()?; + } + self.path_source.as_mut().unwrap().read_packages() + } +} + +fn ident(url: &Url) -> CargoResult { + let url = canonicalize_url(url)?; + let ident = url + .path_segments() + .and_then(|mut s| s.next_back()) + .unwrap_or(""); + + let ident = if ident == "" { "_empty" } else { ident }; + + Ok(format!("{}-{}", ident, short_hash(&url))) +} + +// Some hacks and heuristics for making equivalent URLs hash the same. +pub fn canonicalize_url(url: &Url) -> CargoResult { + let mut url = url.clone(); + + // cannot-be-a-base-urls (e.g., `github.com:rust-lang-nursery/rustfmt.git`) + // are not supported. + if url.cannot_be_a_base() { + failure::bail!( + "invalid url `{}`: cannot-be-a-base-URLs are not supported", + url + ) + } + + // Strip a trailing slash. + if url.path().ends_with('/') { + url.path_segments_mut().unwrap().pop_if_empty(); + } + + // HACK: for GitHub URLs specifically, just lower-case + // everything. GitHub treats both the same, but they hash + // differently, and we're gonna be hashing them. This wants a more + // general solution, and also we're almost certainly not using the + // same case conversion rules that GitHub does. (See issue #84.) + if url.host_str() == Some("github.com") { + url.set_scheme("https").unwrap(); + let path = url.path().to_lowercase(); + url.set_path(&path); + } + + // Repos can generally be accessed with or without `.git` extension. + let needs_chopping = url.path().ends_with(".git"); + if needs_chopping { + let last = { + let last = url.path_segments().unwrap().next_back().unwrap(); + last[..last.len() - 4].to_owned() + }; + url.path_segments_mut().unwrap().pop().push(&last); + } + + Ok(url) +} + +impl<'cfg> Debug for GitSource<'cfg> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "git repo at {}", self.remote.url())?; + + match self.reference.pretty_ref() { + Some(s) => write!(f, " ({})", s), + None => Ok(()), + } + } +} + +impl<'cfg> Source for GitSource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let src = self + .path_source + .as_mut() + .expect("BUG: `update()` must be called before `query()`"); + src.query(dep, f) + } + + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let src = self + .path_source + .as_mut() + .expect("BUG: `update()` must be called before `query()`"); + src.fuzzy_query(dep, f) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + true + } + + fn source_id(&self) -> SourceId { + self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + let lock = + self.config + .git_path() + .open_rw(".cargo-lock-git", self.config, "the git checkouts")?; + + let db_path = lock.parent().join("db").join(&self.ident); + + if self.config.cli_unstable().offline && !db_path.exists() { + failure::bail!( + "can't checkout from '{}': you are in the offline mode (-Z offline)", + self.remote.url() + ); + } + + // Resolve our reference to an actual revision, and check if the + // database already has that revision. If it does, we just load a + // database pinned at that revision, and if we don't we issue an update + // to try to find the revision. + let actual_rev = self.remote.rev_for(&db_path, &self.reference); + let should_update = actual_rev.is_err() || self.source_id.precise().is_none(); + + let (db, actual_rev) = if should_update && !self.config.cli_unstable().offline { + self.config.shell().status( + "Updating", + format!("git repository `{}`", self.remote.url()), + )?; + + trace!("updating git source `{:?}`", self.remote); + + self.remote + .checkout(&db_path, &self.reference, self.config)? + } else { + (self.remote.db_at(&db_path)?, actual_rev.unwrap()) + }; + + // Don’t use the full hash, in order to contribute less to reaching the path length limit + // on Windows. See . + let short_id = db.to_short_id(&actual_rev).unwrap(); + + let checkout_path = lock + .parent() + .join("checkouts") + .join(&self.ident) + .join(short_id.as_str()); + + // Copy the database to the checkout location. After this we could drop + // the lock on the database as we no longer needed it, but we leave it + // in scope so the destructors here won't tamper with too much. + // Checkout is immutable, so we don't need to protect it with a lock once + // it is created. + db.copy_to(actual_rev.clone(), &checkout_path, self.config)?; + + let source_id = self.source_id.with_precise(Some(actual_rev.to_string())); + let path_source = PathSource::new_recursive(&checkout_path, source_id, self.config); + + self.path_source = Some(path_source); + self.rev = Some(actual_rev); + self.path_source.as_mut().unwrap().update() + } + + fn download(&mut self, id: PackageId) -> CargoResult { + trace!( + "getting packages for package ID `{}` from `{:?}`", + id, + self.remote + ); + self.path_source + .as_mut() + .expect("BUG: `update()` must be called before `get()`") + .download(id) + } + + fn finish_download(&mut self, _id: PackageId, _data: Vec) -> CargoResult { + panic!("no download should have started") + } + + fn fingerprint(&self, _pkg: &Package) -> CargoResult { + Ok(self.rev.as_ref().unwrap().to_string()) + } + + fn describe(&self) -> String { + format!("Git repository {}", self.source_id) + } + + fn add_to_yanked_whitelist(&mut self, _pkgs: &[PackageId]) {} +} + +#[cfg(test)] +mod test { + use super::ident; + use crate::util::ToUrl; + use url::Url; + + #[test] + pub fn test_url_to_path_ident_with_path() { + let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap(); + assert!(ident.starts_with("cargo-")); + } + + #[test] + pub fn test_url_to_path_ident_without_path() { + let ident = ident(&url("https://github.com")).unwrap(); + assert!(ident.starts_with("_empty-")); + } + + #[test] + fn test_canonicalize_idents_by_stripping_trailing_url_slash() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_lowercasing_github_urls() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_stripping_dot_git() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_different_protocols() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_cannot_be_a_base_urls() { + assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err()); + assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err()); + } + + fn url(s: &str) -> Url { + s.to_url().unwrap() + } +} diff --git a/src/cargo/sources/git/utils.rs b/src/cargo/sources/git/utils.rs new file mode 100644 index 000000000..662361094 --- /dev/null +++ b/src/cargo/sources/git/utils.rs @@ -0,0 +1,923 @@ +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use curl::easy::{Easy, List}; +use git2::{self, ObjectType}; +use log::{debug, info}; +use serde::ser; +use serde::Serialize; +use url::Url; + +use crate::core::GitReference; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::process_builder::process; +use crate::util::{internal, network, Config, Progress, ToUrl}; + +#[derive(PartialEq, Clone, Debug)] +pub struct GitRevision(git2::Oid); + +impl ser::Serialize for GitRevision { + fn serialize(&self, s: S) -> Result { + serialize_str(self, s) + } +} + +fn serialize_str(t: &T, s: S) -> Result +where + T: fmt::Display, + S: ser::Serializer, +{ + s.collect_str(t) +} + +impl fmt::Display for GitRevision { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +pub struct GitShortID(git2::Buf); + +impl GitShortID { + pub fn as_str(&self) -> &str { + self.0.as_str().unwrap() + } +} + +/// `GitRemote` represents a remote repository. It gets cloned into a local +/// `GitDatabase`. +#[derive(PartialEq, Clone, Debug, Serialize)] +pub struct GitRemote { + #[serde(serialize_with = "serialize_str")] + url: Url, +} + +/// `GitDatabase` is a local clone of a remote repository's database. Multiple +/// `GitCheckouts` can be cloned from this `GitDatabase`. +#[derive(Serialize)] +pub struct GitDatabase { + remote: GitRemote, + path: PathBuf, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +/// `GitCheckout` is a local checkout of a particular revision. Calling +/// `clone_into` with a reference will resolve the reference into a revision, +/// and return a `failure::Error` if no revision for that reference was found. +#[derive(Serialize)] +pub struct GitCheckout<'a> { + database: &'a GitDatabase, + location: PathBuf, + revision: GitRevision, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +// Implementations + +impl GitRemote { + pub fn new(url: &Url) -> GitRemote { + GitRemote { url: url.clone() } + } + + pub fn url(&self) -> &Url { + &self.url + } + + pub fn rev_for(&self, path: &Path, reference: &GitReference) -> CargoResult { + reference.resolve(&self.db_at(path)?.repo) + } + + pub fn checkout( + &self, + into: &Path, + reference: &GitReference, + cargo_config: &Config, + ) -> CargoResult<(GitDatabase, GitRevision)> { + let mut repo_and_rev = None; + if let Ok(mut repo) = git2::Repository::open(into) { + self.fetch_into(&mut repo, cargo_config) + .chain_err(|| format!("failed to fetch into {}", into.display()))?; + if let Ok(rev) = reference.resolve(&repo) { + repo_and_rev = Some((repo, rev)); + } + } + let (repo, rev) = match repo_and_rev { + Some(pair) => pair, + None => { + let repo = self + .clone_into(into, cargo_config) + .chain_err(|| format!("failed to clone into: {}", into.display()))?; + let rev = reference.resolve(&repo)?; + (repo, rev) + } + }; + + Ok(( + GitDatabase { + remote: self.clone(), + path: into.to_path_buf(), + repo, + }, + rev, + )) + } + + pub fn db_at(&self, db_path: &Path) -> CargoResult { + let repo = git2::Repository::open(db_path)?; + Ok(GitDatabase { + remote: self.clone(), + path: db_path.to_path_buf(), + repo, + }) + } + + fn fetch_into(&self, dst: &mut git2::Repository, cargo_config: &Config) -> CargoResult<()> { + // Create a local anonymous remote in the repository to fetch the url + let refspec = "refs/heads/*:refs/heads/*"; + fetch(dst, &self.url, refspec, cargo_config) + } + + fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult { + if fs::metadata(&dst).is_ok() { + paths::remove_dir_all(dst)?; + } + fs::create_dir_all(dst)?; + let mut repo = init(dst, true)?; + fetch( + &mut repo, + &self.url, + "refs/heads/*:refs/heads/*", + cargo_config, + )?; + Ok(repo) + } +} + +impl GitDatabase { + pub fn copy_to( + &self, + rev: GitRevision, + dest: &Path, + cargo_config: &Config, + ) -> CargoResult> { + let mut checkout = None; + if let Ok(repo) = git2::Repository::open(dest) { + let mut co = GitCheckout::new(dest, self, rev.clone(), repo); + if !co.is_fresh() { + // After a successful fetch operation do a sanity check to + // ensure we've got the object in our database to reset to. This + // can fail sometimes for corrupt repositories where the fetch + // operation succeeds but the object isn't actually there. + co.fetch(cargo_config)?; + if co.has_object() { + co.reset(cargo_config)?; + assert!(co.is_fresh()); + checkout = Some(co); + } + } else { + checkout = Some(co); + } + }; + let checkout = match checkout { + Some(c) => c, + None => GitCheckout::clone_into(dest, self, rev, cargo_config)?, + }; + checkout.update_submodules(cargo_config)?; + Ok(checkout) + } + + pub fn to_short_id(&self, revision: &GitRevision) -> CargoResult { + let obj = self.repo.find_object(revision.0, None)?; + Ok(GitShortID(obj.short_id()?)) + } + + pub fn has_ref(&self, reference: &str) -> CargoResult<()> { + self.repo.revparse_single(reference)?; + Ok(()) + } +} + +impl GitReference { + fn resolve(&self, repo: &git2::Repository) -> CargoResult { + let id = match *self { + GitReference::Tag(ref s) => (|| -> CargoResult { + let refname = format!("refs/tags/{}", s); + let id = repo.refname_to_id(&refname)?; + let obj = repo.find_object(id, None)?; + let obj = obj.peel(ObjectType::Commit)?; + Ok(obj.id()) + })() + .chain_err(|| format!("failed to find tag `{}`", s))?, + GitReference::Branch(ref s) => { + let b = repo + .find_branch(s, git2::BranchType::Local) + .chain_err(|| format!("failed to find branch `{}`", s))?; + b.get() + .target() + .ok_or_else(|| failure::format_err!("branch `{}` did not have a target", s))? + } + GitReference::Rev(ref s) => { + let obj = repo.revparse_single(s)?; + match obj.as_tag() { + Some(tag) => tag.target_id(), + None => obj.id(), + } + } + }; + Ok(GitRevision(id)) + } +} + +impl<'a> GitCheckout<'a> { + fn new( + path: &Path, + database: &'a GitDatabase, + revision: GitRevision, + repo: git2::Repository, + ) -> GitCheckout<'a> { + GitCheckout { + location: path.to_path_buf(), + database, + revision, + repo, + } + } + + fn clone_into( + into: &Path, + database: &'a GitDatabase, + revision: GitRevision, + config: &Config, + ) -> CargoResult> { + let dirname = into.parent().unwrap(); + fs::create_dir_all(&dirname) + .chain_err(|| format!("Couldn't mkdir {}", dirname.display()))?; + if into.exists() { + paths::remove_dir_all(into)?; + } + + // we're doing a local filesystem-to-filesystem clone so there should + // be no need to respect global configuration options, so pass in + // an empty instance of `git2::Config` below. + let git_config = git2::Config::new()?; + + // Clone the repository, but make sure we use the "local" option in + // libgit2 which will attempt to use hardlinks to set up the database. + // This should speed up the clone operation quite a bit if it works. + // + // Note that we still use the same fetch options because while we don't + // need authentication information we may want progress bars and such. + let url = database.path.to_url()?; + let mut repo = None; + with_fetch_options(&git_config, &url, config, &mut |fopts| { + let mut checkout = git2::build::CheckoutBuilder::new(); + checkout.dry_run(); // we'll do this below during a `reset` + + let r = git2::build::RepoBuilder::new() + // use hard links and/or copy the database, we're doing a + // filesystem clone so this'll speed things up quite a bit. + .clone_local(git2::build::CloneLocal::Local) + .with_checkout(checkout) + .fetch_options(fopts) + // .remote_create(|repo, _name, url| repo.remote_anonymous(url)) + .clone(url.as_str(), into)?; + repo = Some(r); + Ok(()) + })?; + let repo = repo.unwrap(); + + let checkout = GitCheckout::new(into, database, revision, repo); + checkout.reset(config)?; + Ok(checkout) + } + + fn is_fresh(&self) -> bool { + match self.repo.revparse_single("HEAD") { + Ok(ref head) if head.id() == self.revision.0 => { + // See comments in reset() for why we check this + self.location.join(".cargo-ok").exists() + } + _ => false, + } + } + + fn fetch(&mut self, cargo_config: &Config) -> CargoResult<()> { + info!("fetch {}", self.repo.path().display()); + let url = self.database.path.to_url()?; + let refspec = "refs/heads/*:refs/heads/*"; + fetch(&mut self.repo, &url, refspec, cargo_config)?; + Ok(()) + } + + fn has_object(&self) -> bool { + self.repo.find_object(self.revision.0, None).is_ok() + } + + fn reset(&self, config: &Config) -> CargoResult<()> { + // If we're interrupted while performing this reset (e.g., we die because + // of a signal) Cargo needs to be sure to try to check out this repo + // again on the next go-round. + // + // To enable this we have a dummy file in our checkout, .cargo-ok, which + // if present means that the repo has been successfully reset and is + // ready to go. Hence if we start to do a reset, we make sure this file + // *doesn't* exist, and then once we're done we create the file. + let ok_file = self.location.join(".cargo-ok"); + let _ = paths::remove_file(&ok_file); + info!("reset {} to {}", self.repo.path().display(), self.revision); + let object = self.repo.find_object(self.revision.0, None)?; + reset(&self.repo, &object, config)?; + File::create(ok_file)?; + Ok(()) + } + + fn update_submodules(&self, cargo_config: &Config) -> CargoResult<()> { + return update_submodules(&self.repo, cargo_config); + + fn update_submodules(repo: &git2::Repository, cargo_config: &Config) -> CargoResult<()> { + info!("update submodules for: {:?}", repo.workdir().unwrap()); + + for mut child in repo.submodules()? { + update_submodule(repo, &mut child, cargo_config).chain_err(|| { + format!( + "failed to update submodule `{}`", + child.name().unwrap_or("") + ) + })?; + } + Ok(()) + } + + fn update_submodule( + parent: &git2::Repository, + child: &mut git2::Submodule<'_>, + cargo_config: &Config, + ) -> CargoResult<()> { + child.init(false)?; + let url = child + .url() + .ok_or_else(|| internal("non-utf8 url for submodule"))?; + + // A submodule which is listed in .gitmodules but not actually + // checked out will not have a head id, so we should ignore it. + let head = match child.head_id() { + Some(head) => head, + None => return Ok(()), + }; + + // If the submodule hasn't been checked out yet, we need to + // clone it. If it has been checked out and the head is the same + // as the submodule's head, then we can skip an update and keep + // recursing. + let head_and_repo = child.open().and_then(|repo| { + let target = repo.head()?.target(); + Ok((target, repo)) + }); + let mut repo = match head_and_repo { + Ok((head, repo)) => { + if child.head_id() == head { + return update_submodules(&repo, cargo_config); + } + repo + } + Err(..) => { + let path = parent.workdir().unwrap().join(child.path()); + let _ = paths::remove_dir_all(&path); + init(&path, false)? + } + }; + + // Fetch data from origin and reset to the head commit + let refspec = "refs/heads/*:refs/heads/*"; + let url = url.to_url()?; + fetch(&mut repo, &url, refspec, cargo_config).chain_err(|| { + internal(format!( + "failed to fetch submodule `{}` from {}", + child.name().unwrap_or(""), + url + )) + })?; + + let obj = repo.find_object(head, None)?; + reset(&repo, &obj, cargo_config)?; + update_submodules(&repo, cargo_config) + } + } +} + +/// Prepare the authentication callbacks for cloning a git repository. +/// +/// The main purpose of this function is to construct the "authentication +/// callback" which is used to clone a repository. This callback will attempt to +/// find the right authentication on the system (without user input) and will +/// guide libgit2 in doing so. +/// +/// The callback is provided `allowed` types of credentials, and we try to do as +/// much as possible based on that: +/// +/// * Prioritize SSH keys from the local ssh agent as they're likely the most +/// reliable. The username here is prioritized from the credential +/// callback, then from whatever is configured in git itself, and finally +/// we fall back to the generic user of `git`. +/// +/// * If a username/password is allowed, then we fallback to git2-rs's +/// implementation of the credential helper. This is what is configured +/// with `credential.helper` in git, and is the interface for the macOS +/// keychain, for example. +/// +/// * After the above two have failed, we just kinda grapple attempting to +/// return *something*. +/// +/// If any form of authentication fails, libgit2 will repeatedly ask us for +/// credentials until we give it a reason to not do so. To ensure we don't +/// just sit here looping forever we keep track of authentications we've +/// attempted and we don't try the same ones again. +fn with_authentication(url: &str, cfg: &git2::Config, mut f: F) -> CargoResult +where + F: FnMut(&mut git2::Credentials<'_>) -> CargoResult, +{ + let mut cred_helper = git2::CredentialHelper::new(url); + cred_helper.config(cfg); + + let mut ssh_username_requested = false; + let mut cred_helper_bad = None; + let mut ssh_agent_attempts = Vec::new(); + let mut any_attempts = false; + let mut tried_sshkey = false; + + let mut res = f(&mut |url, username, allowed| { + any_attempts = true; + // libgit2's "USERNAME" authentication actually means that it's just + // asking us for a username to keep going. This is currently only really + // used for SSH authentication and isn't really an authentication type. + // The logic currently looks like: + // + // let user = ...; + // if (user.is_null()) + // user = callback(USERNAME, null, ...); + // + // callback(SSH_KEY, user, ...) + // + // So if we're being called here then we know that (a) we're using ssh + // authentication and (b) no username was specified in the URL that + // we're trying to clone. We need to guess an appropriate username here, + // but that may involve a few attempts. Unfortunately we can't switch + // usernames during one authentication session with libgit2, so to + // handle this we bail out of this authentication session after setting + // the flag `ssh_username_requested`, and then we handle this below. + if allowed.contains(git2::CredentialType::USERNAME) { + debug_assert!(username.is_none()); + ssh_username_requested = true; + return Err(git2::Error::from_str("gonna try usernames later")); + } + + // An "SSH_KEY" authentication indicates that we need some sort of SSH + // authentication. This can currently either come from the ssh-agent + // process or from a raw in-memory SSH key. Cargo only supports using + // ssh-agent currently. + // + // If we get called with this then the only way that should be possible + // is if a username is specified in the URL itself (e.g., `username` is + // Some), hence the unwrap() here. We try custom usernames down below. + if allowed.contains(git2::CredentialType::SSH_KEY) && !tried_sshkey { + // If ssh-agent authentication fails, libgit2 will keep + // calling this callback asking for other authentication + // methods to try. Make sure we only try ssh-agent once, + // to avoid looping forever. + tried_sshkey = true; + let username = username.unwrap(); + debug_assert!(!ssh_username_requested); + ssh_agent_attempts.push(username.to_string()); + return git2::Cred::ssh_key_from_agent(username); + } + + // Sometimes libgit2 will ask for a username/password in plaintext. This + // is where Cargo would have an interactive prompt if we supported it, + // but we currently don't! Right now the only way we support fetching a + // plaintext password is through the `credential.helper` support, so + // fetch that here. + // + // If ssh-agent authentication fails, libgit2 will keep calling this + // callback asking for other authentication methods to try. Check + // cred_helper_bad to make sure we only try the git credentail helper + // once, to avoid looping forever. + if allowed.contains(git2::CredentialType::USER_PASS_PLAINTEXT) && cred_helper_bad.is_none() { + let r = git2::Cred::credential_helper(cfg, url, username); + cred_helper_bad = Some(r.is_err()); + return r; + } + + // I'm... not sure what the DEFAULT kind of authentication is, but seems + // easy to support? + if allowed.contains(git2::CredentialType::DEFAULT) { + return git2::Cred::default(); + } + + // Whelp, we tried our best + Err(git2::Error::from_str("no authentication available")) + }); + + // Ok, so if it looks like we're going to be doing ssh authentication, we + // want to try a few different usernames as one wasn't specified in the URL + // for us to use. In order, we'll try: + // + // * A credential helper's username for this URL, if available. + // * This account's username. + // * "git" + // + // We have to restart the authentication session each time (due to + // constraints in libssh2 I guess? maybe this is inherent to ssh?), so we + // call our callback, `f`, in a loop here. + if ssh_username_requested { + debug_assert!(res.is_err()); + let mut attempts = Vec::new(); + attempts.push("git".to_string()); + if let Ok(s) = env::var("USER").or_else(|_| env::var("USERNAME")) { + attempts.push(s); + } + if let Some(ref s) = cred_helper.username { + attempts.push(s.clone()); + } + + while let Some(s) = attempts.pop() { + // We should get `USERNAME` first, where we just return our attempt, + // and then after that we should get `SSH_KEY`. If the first attempt + // fails we'll get called again, but we don't have another option so + // we bail out. + let mut attempts = 0; + res = f(&mut |_url, username, allowed| { + if allowed.contains(git2::CredentialType::USERNAME) { + return git2::Cred::username(&s); + } + if allowed.contains(git2::CredentialType::SSH_KEY) { + debug_assert_eq!(Some(&s[..]), username); + attempts += 1; + if attempts == 1 { + ssh_agent_attempts.push(s.to_string()); + return git2::Cred::ssh_key_from_agent(&s); + } + } + Err(git2::Error::from_str("no authentication available")) + }); + + // If we made two attempts then that means: + // + // 1. A username was requested, we returned `s`. + // 2. An ssh key was requested, we returned to look up `s` in the + // ssh agent. + // 3. For whatever reason that lookup failed, so we were asked again + // for another mode of authentication. + // + // Essentially, if `attempts == 2` then in theory the only error was + // that this username failed to authenticate (e.g., no other network + // errors happened). Otherwise something else is funny so we bail + // out. + if attempts != 2 { + break; + } + } + } + + if res.is_ok() || !any_attempts { + return res.map_err(From::from); + } + + // In the case of an authentication failure (where we tried something) then + // we try to give a more helpful error message about precisely what we + // tried. + let res = res.map_err(failure::Error::from).chain_err(|| { + let mut msg = "failed to authenticate when downloading \ + repository" + .to_string(); + if !ssh_agent_attempts.is_empty() { + let names = ssh_agent_attempts + .iter() + .map(|s| format!("`{}`", s)) + .collect::>() + .join(", "); + msg.push_str(&format!( + "\nattempted ssh-agent authentication, but \ + none of the usernames {} succeeded", + names + )); + } + if let Some(failed_cred_helper) = cred_helper_bad { + if failed_cred_helper { + msg.push_str( + "\nattempted to find username/password via \ + git's `credential.helper` support, but failed", + ); + } else { + msg.push_str( + "\nattempted to find username/password via \ + `credential.helper`, but maybe the found \ + credentials were incorrect", + ); + } + } + msg + })?; + Ok(res) +} + +fn reset(repo: &git2::Repository, obj: &git2::Object<'_>, config: &Config) -> CargoResult<()> { + let mut pb = Progress::new("Checkout", config); + let mut opts = git2::build::CheckoutBuilder::new(); + opts.progress(|_, cur, max| { + drop(pb.tick(cur, max)); + }); + repo.reset(obj, git2::ResetType::Hard, Some(&mut opts))?; + Ok(()) +} + +pub fn with_fetch_options( + git_config: &git2::Config, + url: &Url, + config: &Config, + cb: &mut dyn FnMut(git2::FetchOptions<'_>) -> CargoResult<()>, +) -> CargoResult<()> { + let mut progress = Progress::new("Fetch", config); + network::with_retry(config, || { + with_authentication(url.as_str(), git_config, |f| { + let mut rcb = git2::RemoteCallbacks::new(); + rcb.credentials(f); + + rcb.transfer_progress(|stats| { + progress + .tick(stats.indexed_objects(), stats.total_objects()) + .is_ok() + }); + + // Create a local anonymous remote in the repository to fetch the + // url + let mut opts = git2::FetchOptions::new(); + opts.remote_callbacks(rcb) + .download_tags(git2::AutotagOption::All); + cb(opts) + })?; + Ok(()) + }) +} + +pub fn fetch( + repo: &mut git2::Repository, + url: &Url, + refspec: &str, + config: &Config, +) -> CargoResult<()> { + if config.frozen() { + failure::bail!( + "attempting to update a git repository, but --frozen \ + was specified" + ) + } + if !config.network_allowed() { + failure::bail!("can't update a git repository in the offline mode") + } + + // If we're fetching from GitHub, attempt GitHub's special fast path for + // testing if we've already got an up-to-date copy of the repository + if url.host_str() == Some("github.com") { + if let Ok(oid) = repo.refname_to_id("refs/remotes/origin/master") { + let mut handle = config.http()?.borrow_mut(); + debug!("attempting GitHub fast path for {}", url); + if github_up_to_date(&mut handle, url, &oid) { + return Ok(()); + } else { + debug!("fast path failed, falling back to a git fetch"); + } + } + } + + // We reuse repositories quite a lot, so before we go through and update the + // repo check to see if it's a little too old and could benefit from a gc. + // In theory this shouldn't be too too expensive compared to the network + // request we're about to issue. + maybe_gc_repo(repo)?; + + // Unfortuantely `libgit2` is notably lacking in the realm of authentication + // when compared to the `git` command line. As a result, allow an escape + // hatch for users that would prefer to use `git`-the-CLI for fetching + // repositories instead of `libgit2`-the-library. This should make more + // flavors of authentication possible while also still giving us all the + // speed and portability of using `libgit2`. + if let Some(val) = config.get_bool("net.git-fetch-with-cli")? { + if val.val { + return fetch_with_cli(repo, url, refspec, config); + } + } + + debug!("doing a fetch for {}", url); + let git_config = git2::Config::open_default()?; + with_fetch_options(&git_config, url, config, &mut |mut opts| { + // The `fetch` operation here may fail spuriously due to a corrupt + // repository. It could also fail, however, for a whole slew of other + // reasons (aka network related reasons). We want Cargo to automatically + // recover from corrupt repositories, but we don't want Cargo to stomp + // over other legitimate errors.o + // + // Consequently we save off the error of the `fetch` operation and if it + // looks like a "corrupt repo" error then we blow away the repo and try + // again. If it looks like any other kind of error, or if we've already + // blown away the repository, then we want to return the error as-is. + let mut repo_reinitialized = false; + loop { + debug!("initiating fetch of {} from {}", refspec, url); + let res = repo + .remote_anonymous(url.as_str())? + .fetch(&[refspec], Some(&mut opts), None); + let err = match res { + Ok(()) => break, + Err(e) => e, + }; + debug!("fetch failed: {}", err); + + if !repo_reinitialized && err.class() == git2::ErrorClass::Reference { + repo_reinitialized = true; + debug!( + "looks like this is a corrupt repository, reinitializing \ + and trying again" + ); + if reinitialize(repo).is_ok() { + continue; + } + } + + return Err(err.into()); + } + Ok(()) + }) +} + +fn fetch_with_cli( + repo: &mut git2::Repository, + url: &Url, + refspec: &str, + config: &Config, +) -> CargoResult<()> { + let mut cmd = process("git"); + cmd.arg("fetch") + .arg("--tags") // fetch all tags + .arg("--quiet") + .arg("--update-head-ok") // see discussion in #2078 + .arg(url.to_string()) + .arg(refspec) + .cwd(repo.path()); + config + .shell() + .verbose(|s| s.status("Running", &cmd.to_string()))?; + cmd.exec()?; + Ok(()) +} + +/// Cargo has a bunch of long-lived git repositories in its global cache and +/// some, like the index, are updated very frequently. Right now each update +/// creates a new "pack file" inside the git database, and over time this can +/// cause bad performance and bad current behavior in libgit2. +/// +/// One pathological use case today is where libgit2 opens hundreds of file +/// descriptors, getting us dangerously close to blowing out the OS limits of +/// how many fds we can have open. This is detailed in #4403. +/// +/// To try to combat this problem we attempt a `git gc` here. Note, though, that +/// we may not even have `git` installed on the system! As a result we +/// opportunistically try a `git gc` when the pack directory looks too big, and +/// failing that we just blow away the repository and start over. +fn maybe_gc_repo(repo: &mut git2::Repository) -> CargoResult<()> { + // Here we arbitrarily declare that if you have more than 100 files in your + // `pack` folder that we need to do a gc. + let entries = match repo.path().join("objects/pack").read_dir() { + Ok(e) => e.count(), + Err(_) => { + debug!("skipping gc as pack dir appears gone"); + return Ok(()); + } + }; + let max = env::var("__CARGO_PACKFILE_LIMIT") + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(100); + if entries < max { + debug!("skipping gc as there's only {} pack files", entries); + return Ok(()); + } + + // First up, try a literal `git gc` by shelling out to git. This is pretty + // likely to fail though as we may not have `git` installed. Note that + // libgit2 doesn't currently implement the gc operation, so there's no + // equivalent there. + match Command::new("git") + .arg("gc") + .current_dir(repo.path()) + .output() + { + Ok(out) => { + debug!( + "git-gc status: {}\n\nstdout ---\n{}\nstderr ---\n{}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr) + ); + if out.status.success() { + let new = git2::Repository::open(repo.path())?; + mem::replace(repo, new); + return Ok(()); + } + } + Err(e) => debug!("git-gc failed to spawn: {}", e), + } + + // Alright all else failed, let's start over. + reinitialize(repo) +} + +fn reinitialize(repo: &mut git2::Repository) -> CargoResult<()> { + // Here we want to drop the current repository object pointed to by `repo`, + // so we initialize temporary repository in a sub-folder, blow away the + // existing git folder, and then recreate the git repo. Finally we blow away + // the `tmp` folder we allocated. + let path = repo.path().to_path_buf(); + debug!("reinitializing git repo at {:?}", path); + let tmp = path.join("tmp"); + let bare = !repo.path().ends_with(".git"); + *repo = init(&tmp, false)?; + for entry in path.read_dir()? { + let entry = entry?; + if entry.file_name().to_str() == Some("tmp") { + continue; + } + let path = entry.path(); + drop(paths::remove_file(&path).or_else(|_| paths::remove_dir_all(&path))); + } + *repo = init(&path, bare)?; + paths::remove_dir_all(&tmp)?; + Ok(()) +} + +fn init(path: &Path, bare: bool) -> CargoResult { + let mut opts = git2::RepositoryInitOptions::new(); + // Skip anyting related to templates, they just call all sorts of issues as + // we really don't want to use them yet they insist on being used. See #6240 + // for an example issue that comes up. + opts.external_template(false); + opts.bare(bare); + Ok(git2::Repository::init_opts(&path, &opts)?) +} + +/// Updating the index is done pretty regularly so we want it to be as fast as +/// possible. For registries hosted on GitHub (like the crates.io index) there's +/// a fast path available to use [1] to tell us that there's no updates to be +/// made. +/// +/// This function will attempt to hit that fast path and verify that the `oid` +/// is actually the current `master` branch of the repository. If `true` is +/// returned then no update needs to be performed, but if `false` is returned +/// then the standard update logic still needs to happen. +/// +/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference +/// +/// Note that this function should never cause an actual failure because it's +/// just a fast path. As a result all errors are ignored in this function and we +/// just return a `bool`. Any real errors will be reported through the normal +/// update path above. +fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool { + macro_rules! r#try { + ($e:expr) => { + match $e { + Some(e) => e, + None => return false, + } + }; + } + + // This expects GitHub urls in the form `github.com/user/repo` and nothing + // else + let mut pieces = r#try!(url.path_segments()); + let username = r#try!(pieces.next()); + let repo = r#try!(pieces.next()); + if pieces.next().is_some() { + return false; + } + + let url = format!( + "https://api.github.com/repos/{}/{}/commits/master", + username, repo + ); + r#try!(handle.get(true).ok()); + r#try!(handle.url(&url).ok()); + r#try!(handle.useragent("cargo").ok()); + let mut headers = List::new(); + r#try!(headers.append("Accept: application/vnd.github.3.sha").ok()); + r#try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok()); + r#try!(handle.http_headers(headers).ok()); + r#try!(handle.perform().ok()); + + r#try!(handle.response_code().ok()) == 304 +} diff --git a/src/cargo/sources/mod.rs b/src/cargo/sources/mod.rs new file mode 100644 index 000000000..d96a05639 --- /dev/null +++ b/src/cargo/sources/mod.rs @@ -0,0 +1,13 @@ +pub use self::config::SourceConfigMap; +pub use self::directory::DirectorySource; +pub use self::git::GitSource; +pub use self::path::PathSource; +pub use self::registry::{RegistrySource, CRATES_IO_INDEX, CRATES_IO_REGISTRY}; +pub use self::replaced::ReplacedSource; + +pub mod config; +pub mod directory; +pub mod git; +pub mod path; +pub mod registry; +pub mod replaced; diff --git a/src/cargo/sources/path.rs b/src/cargo/sources/path.rs new file mode 100644 index 000000000..fd2e4bb5e --- /dev/null +++ b/src/cargo/sources/path.rs @@ -0,0 +1,577 @@ +use std::fmt::{self, Debug, Formatter}; +use std::fs; +use std::path::{Path, PathBuf}; + +use filetime::FileTime; +use glob::Pattern; +use ignore::gitignore::GitignoreBuilder; +use ignore::Match; +use log::{trace, warn}; + +use crate::core::source::MaybePackage; +use crate::core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use crate::ops; +use crate::util::paths; +use crate::util::Config; +use crate::util::{internal, CargoResult}; + +pub struct PathSource<'cfg> { + source_id: SourceId, + path: PathBuf, + updated: bool, + packages: Vec, + config: &'cfg Config, + recursive: bool, +} + +impl<'cfg> PathSource<'cfg> { + /// Invoked with an absolute path to a directory that contains a `Cargo.toml`. + /// + /// This source will only return the package at precisely the `path` + /// specified, and it will be an error if there's not a package at `path`. + pub fn new(path: &Path, source_id: SourceId, config: &'cfg Config) -> PathSource<'cfg> { + PathSource { + source_id, + path: path.to_path_buf(), + updated: false, + packages: Vec::new(), + config, + recursive: false, + } + } + + /// Creates a new source which is walked recursively to discover packages. + /// + /// This is similar to the `new` method except that instead of requiring a + /// valid package to be present at `root` the folder is walked entirely to + /// crawl for packages. + /// + /// Note that this should be used with care and likely shouldn't be chosen + /// by default! + pub fn new_recursive(root: &Path, id: SourceId, config: &'cfg Config) -> PathSource<'cfg> { + PathSource { + recursive: true, + ..PathSource::new(root, id, config) + } + } + + pub fn preload_with(&mut self, pkg: Package) { + assert!(!self.updated); + assert!(!self.recursive); + assert!(self.packages.is_empty()); + self.updated = true; + self.packages.push(pkg); + } + + pub fn root_package(&mut self) -> CargoResult { + trace!("root_package; source={:?}", self); + + self.update()?; + + match self.packages.iter().find(|p| p.root() == &*self.path) { + Some(pkg) => Ok(pkg.clone()), + None => Err(internal("no package found in source")), + } + } + + pub fn read_packages(&self) -> CargoResult> { + if self.updated { + Ok(self.packages.clone()) + } else if self.recursive { + ops::read_packages(&self.path, self.source_id, self.config) + } else { + let path = self.path.join("Cargo.toml"); + let (pkg, _) = ops::read_package(&path, self.source_id, self.config)?; + Ok(vec![pkg]) + } + } + + /// List all files relevant to building this package inside this source. + /// + /// This function will use the appropriate methods to determine the + /// set of files underneath this source's directory which are relevant for + /// building `pkg`. + /// + /// The basic assumption of this method is that all files in the directory + /// are relevant for building this package, but it also contains logic to + /// use other methods like .gitignore to filter the list of files. + /// + /// ## Pattern matching strategy + /// + /// Migrating from a glob-like pattern matching (using `glob` crate) to a + /// gitignore-like pattern matching (using `ignore` crate). The migration + /// stages are: + /// + /// 1) Only warn users about the future change iff their matching rules are + /// affected. (CURRENT STAGE) + /// + /// 2) Switch to the new strategy and update documents. Still keep warning + /// affected users. + /// + /// 3) Drop the old strategy and no more warnings. + /// + /// See rust-lang/cargo#4268 for more info. + pub fn list_files(&self, pkg: &Package) -> CargoResult> { + let root = pkg.root(); + let no_include_option = pkg.manifest().include().is_empty(); + + // Glob-like matching rules. + + let glob_parse = |p: &String| { + let pattern: &str = if p.starts_with('/') { + &p[1..p.len()] + } else { + p + }; + Pattern::new(pattern) + .map_err(|e| failure::format_err!("could not parse glob pattern `{}`: {}", p, e)) + }; + + let glob_exclude = pkg + .manifest() + .exclude() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_include = pkg + .manifest() + .include() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_should_package = |relative_path: &Path| -> bool { + fn glob_match(patterns: &[Pattern], relative_path: &Path) -> bool { + patterns + .iter() + .any(|pattern| pattern.matches_path(relative_path)) + } + + // "Include" and "exclude" options are mutually exclusive. + if no_include_option { + !glob_match(&glob_exclude, relative_path) + } else { + glob_match(&glob_include, relative_path) + } + }; + + // Ignore-like matching rules. + + let mut exclude_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().exclude() { + exclude_builder.add_line(None, rule)?; + } + let ignore_exclude = exclude_builder.build()?; + + let mut include_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().include() { + include_builder.add_line(None, rule)?; + } + let ignore_include = include_builder.build()?; + + let ignore_should_package = |relative_path: &Path| -> CargoResult { + // "Include" and "exclude" options are mutually exclusive. + if no_include_option { + match ignore_exclude + .matched_path_or_any_parents(relative_path, /* is_dir */ false) + { + Match::None => Ok(true), + Match::Ignore(_) => Ok(false), + Match::Whitelist(pattern) => Err(failure::format_err!( + "exclude rules cannot start with `!`: {}", + pattern.original() + )), + } + } else { + match ignore_include + .matched_path_or_any_parents(relative_path, /* is_dir */ false) + { + Match::None => Ok(false), + Match::Ignore(_) => Ok(true), + Match::Whitelist(pattern) => Err(failure::format_err!( + "include rules cannot start with `!`: {}", + pattern.original() + )), + } + } + }; + + // Matching to paths. + + let mut filter = |path: &Path| -> CargoResult { + let relative_path = path.strip_prefix(root)?; + let glob_should_package = glob_should_package(relative_path); + let ignore_should_package = ignore_should_package(relative_path)?; + + if glob_should_package != ignore_should_package { + if glob_should_package { + if no_include_option { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be excluded in a future Cargo version.\n\ + See for more \ + information.", + relative_path.display() + ))?; + } else { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be included in a future Cargo version.\n\ + See for more \ + information.", + relative_path.display() + ))?; + } + } else if no_include_option { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be excluded in a future Cargo version.\n\ + See for more \ + information.", + relative_path.display() + ))?; + } else { + self.config.shell().warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be included in a future Cargo version.\n\ + See for more \ + information.", + relative_path.display() + ))?; + } + } + + // Update to `ignore_should_package` for Stage 2. + Ok(glob_should_package) + }; + + // Attempt Git-prepopulate only if no `include` (see rust-lang/cargo#4135). + if no_include_option { + if let Some(result) = self.discover_git_and_list_files(pkg, root, &mut filter) { + return result; + } + } + self.list_files_walk(pkg, &mut filter) + } + + // Returns `Some(_)` if found sibling `Cargo.toml` and `.git` directory; + // otherwise, caller should fall back on full file list. + fn discover_git_and_list_files( + &self, + pkg: &Package, + root: &Path, + filter: &mut dyn FnMut(&Path) -> CargoResult, + ) -> Option>> { + // If this package is in a Git repository, then we really do want to + // query the Git repository as it takes into account items such as + // `.gitignore`. We're not quite sure where the Git repository is, + // however, so we do a bit of a probe. + // + // We walk this package's path upwards and look for a sibling + // `Cargo.toml` and `.git` directory. If we find one then we assume that + // we're part of that repository. + let mut cur = root; + loop { + if cur.join("Cargo.toml").is_file() { + // If we find a Git repository next to this `Cargo.toml`, we still + // check to see if we are indeed part of the index. If not, then + // this is likely an unrelated Git repo, so keep going. + if let Ok(repo) = git2::Repository::open(cur) { + let index = match repo.index() { + Ok(index) => index, + Err(err) => return Some(Err(err.into())), + }; + let path = root.strip_prefix(cur).unwrap().join("Cargo.toml"); + if index.get_path(&path, 0).is_some() { + return Some(self.list_files_git(pkg, &repo, filter)); + } + } + } + // Don't cross submodule boundaries. + if cur.join(".git").is_dir() { + break; + } + match cur.parent() { + Some(parent) => cur = parent, + None => break, + } + } + None + } + + fn list_files_git( + &self, + pkg: &Package, + repo: &git2::Repository, + filter: &mut dyn FnMut(&Path) -> CargoResult, + ) -> CargoResult> { + warn!("list_files_git {}", pkg.package_id()); + let index = repo.index()?; + let root = repo + .workdir() + .ok_or_else(|| internal("Can't list files on a bare repository."))?; + let pkg_path = pkg.root(); + + let mut ret = Vec::::new(); + + // We use information from the Git repository to guide us in traversing + // its tree. The primary purpose of this is to take advantage of the + // `.gitignore` and auto-ignore files that don't matter. + // + // Here we're also careful to look at both tracked and untracked files as + // the untracked files are often part of a build and may become relevant + // as part of a future commit. + let index_files = index.iter().map(|entry| { + use libgit2_sys::GIT_FILEMODE_COMMIT; + let is_dir = entry.mode == GIT_FILEMODE_COMMIT as u32; + (join(root, &entry.path), Some(is_dir)) + }); + let mut opts = git2::StatusOptions::new(); + opts.include_untracked(true); + if let Ok(suffix) = pkg_path.strip_prefix(root) { + opts.pathspec(suffix); + } + let statuses = repo.statuses(Some(&mut opts))?; + let untracked = statuses.iter().filter_map(|entry| match entry.status() { + git2::Status::WT_NEW => Some((join(root, entry.path_bytes()), None)), + _ => None, + }); + + let mut subpackages_found = Vec::new(); + + for (file_path, is_dir) in index_files.chain(untracked) { + let file_path = file_path?; + + // Filter out files blatantly outside this package. This is helped a + // bit obove via the `pathspec` function call, but we need to filter + // the entries in the index as well. + if !file_path.starts_with(pkg_path) { + continue; + } + + match file_path.file_name().and_then(|s| s.to_str()) { + // Filter out `Cargo.lock` and `target` always; we don't want to + // package a lock file no one will ever read and we also avoid + // build artifacts. + Some("Cargo.lock") | Some("target") => continue, + + // Keep track of all sub-packages found and also strip out all + // matches we've found so far. Note, though, that if we find + // our own `Cargo.toml`, we keep going. + Some("Cargo.toml") => { + let path = file_path.parent().unwrap(); + if path != pkg_path { + warn!("subpackage found: {}", path.display()); + ret.retain(|p| !p.starts_with(path)); + subpackages_found.push(path.to_path_buf()); + continue; + } + } + + _ => {} + } + + // If this file is part of any other sub-package we've found so far, + // skip it. + if subpackages_found.iter().any(|p| file_path.starts_with(p)) { + continue; + } + + if is_dir.unwrap_or_else(|| file_path.is_dir()) { + warn!(" found submodule {}", file_path.display()); + let rel = file_path.strip_prefix(root)?; + let rel = rel.to_str().ok_or_else(|| { + failure::format_err!("invalid utf-8 filename: {}", rel.display()) + })?; + // Git submodules are currently only named through `/` path + // separators, explicitly not `\` which windows uses. Who knew? + let rel = rel.replace(r"\", "/"); + match repo.find_submodule(&rel).and_then(|s| s.open()) { + Ok(repo) => { + let files = self.list_files_git(pkg, &repo, filter)?; + ret.extend(files.into_iter()); + } + Err(..) => { + PathSource::walk(&file_path, &mut ret, false, filter)?; + } + } + } else if (*filter)(&file_path)? { + // We found a file! + warn!(" found {}", file_path.display()); + ret.push(file_path); + } + } + return Ok(ret); + + #[cfg(unix)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::ffi::OsStr; + use std::os::unix::prelude::*; + Ok(path.join(::from_bytes(data))) + } + #[cfg(windows)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(data) { + Ok(s) => Ok(path.join(s)), + Err(..) => Err(internal( + "cannot process path in git with a non \ + unicode filename", + )), + } + } + } + + fn list_files_walk( + &self, + pkg: &Package, + filter: &mut dyn FnMut(&Path) -> CargoResult, + ) -> CargoResult> { + let mut ret = Vec::new(); + PathSource::walk(pkg.root(), &mut ret, true, filter)?; + Ok(ret) + } + + fn walk( + path: &Path, + ret: &mut Vec, + is_root: bool, + filter: &mut dyn FnMut(&Path) -> CargoResult, + ) -> CargoResult<()> { + if !fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false) { + if (*filter)(path)? { + ret.push(path.to_path_buf()); + } + return Ok(()); + } + // Don't recurse into any sub-packages that we have. + if !is_root && fs::metadata(&path.join("Cargo.toml")).is_ok() { + return Ok(()); + } + + // For package integration tests, we need to sort the paths in a deterministic order to + // be able to match stdout warnings in the same order. + // + // TODO: drop `collect` and sort after transition period and dropping warning tests. + // See rust-lang/cargo#4268 and rust-lang/cargo#4270. + let mut entries: Vec = fs::read_dir(path)?.map(|e| e.unwrap().path()).collect(); + entries.sort_unstable_by(|a, b| a.as_os_str().cmp(b.as_os_str())); + for path in entries { + let name = path.file_name().and_then(|s| s.to_str()); + // Skip dotfile directories. + if name.map(|s| s.starts_with('.')) == Some(true) { + continue; + } + if is_root { + // Skip Cargo artifacts. + match name { + Some("target") | Some("Cargo.lock") => continue, + _ => {} + } + } + PathSource::walk(&path, ret, false, filter)?; + } + Ok(()) + } + + pub fn last_modified_file(&self, pkg: &Package) -> CargoResult<(FileTime, PathBuf)> { + if !self.updated { + return Err(internal("BUG: source was not updated")); + } + + let mut max = FileTime::zero(); + let mut max_path = PathBuf::new(); + for file in self.list_files(pkg)? { + // An `fs::stat` error here is either because path is a + // broken symlink, a permissions error, or a race + // condition where this path was `rm`-ed -- either way, + // we can ignore the error and treat the path's `mtime` + // as `0`. + let mtime = paths::mtime(&file).unwrap_or_else(|_| FileTime::zero()); + if mtime > max { + max = mtime; + max_path = file; + } + } + trace!("last modified file {}: {}", self.path.display(), max); + Ok((max, max_path)) + } + + pub fn path(&self) -> &Path { + &self.path + } +} + +impl<'cfg> Debug for PathSource<'cfg> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "the paths source") + } +} + +impl<'cfg> Source for PathSource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + for s in self.packages.iter().map(|p| p.summary()) { + if dep.matches(s) { + f(s.clone()) + } + } + Ok(()) + } + + fn fuzzy_query(&mut self, _dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + for s in self.packages.iter().map(|p| p.summary()) { + f(s.clone()) + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + false + } + + fn source_id(&self) -> SourceId { + self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + if !self.updated { + let packages = self.read_packages()?; + self.packages.extend(packages.into_iter()); + self.updated = true; + } + + Ok(()) + } + + fn download(&mut self, id: PackageId) -> CargoResult { + trace!("getting packages; id={}", id); + + let pkg = self.packages.iter().find(|pkg| pkg.package_id() == id); + pkg.cloned() + .map(MaybePackage::Ready) + .ok_or_else(|| internal(format!("failed to find {} in path source", id))) + } + + fn finish_download(&mut self, _id: PackageId, _data: Vec) -> CargoResult { + panic!("no download should have started") + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + let (max, max_path) = self.last_modified_file(pkg)?; + Ok(format!("{} ({})", max, max_path.display())) + } + + fn describe(&self) -> String { + match self.source_id.url().to_file_path() { + Ok(path) => path.display().to_string(), + Err(_) => self.source_id.to_string(), + } + } + + fn add_to_yanked_whitelist(&mut self, _pkgs: &[PackageId]) {} +} diff --git a/src/cargo/sources/registry/index.rs b/src/cargo/sources/registry/index.rs new file mode 100644 index 000000000..a593fcc53 --- /dev/null +++ b/src/cargo/sources/registry/index.rs @@ -0,0 +1,316 @@ +use std::collections::{HashMap, HashSet}; +use std::path::Path; +use std::str; + +use log::{info, trace}; +use semver::Version; + +use crate::core::dependency::Dependency; +use crate::core::{PackageId, SourceId, Summary}; +use crate::sources::registry::RegistryData; +use crate::sources::registry::{RegistryPackage, INDEX_LOCK}; +use crate::util::{internal, CargoResult, Config, Filesystem, ToSemver}; + +/// Crates.io treats hyphen and underscores as interchangeable, but the index and old Cargo do not. +/// Therefore, the index must store uncanonicalized version of the name so old Cargo's can find it. +/// This loop tries all possible combinations of switching hyphen and underscores to find the +/// uncanonicalized one. As all stored inputs have the correct spelling, we start with the spelling +/// as-provided. +struct UncanonicalizedIter<'s> { + input: &'s str, + num_hyphen_underscore: u32, + hyphen_combination_num: u16, +} + +impl<'s> UncanonicalizedIter<'s> { + fn new(input: &'s str) -> Self { + let num_hyphen_underscore = input.chars().filter(|&c| c == '_' || c == '-').count() as u32; + UncanonicalizedIter { + input, + num_hyphen_underscore, + hyphen_combination_num: 0, + } + } +} + +impl<'s> Iterator for UncanonicalizedIter<'s> { + type Item = String; + + fn next(&mut self) -> Option { + if self.hyphen_combination_num > 0 + && self.hyphen_combination_num.trailing_zeros() >= self.num_hyphen_underscore + { + return None; + } + + let ret = Some( + self.input + .chars() + .scan(0u16, |s, c| { + // the check against 15 here's to prevent + // shift overflow on inputs with more then 15 hyphens + if (c == '_' || c == '-') && *s <= 15 { + let switch = (self.hyphen_combination_num & (1u16 << *s)) > 0; + let out = if (c == '_') ^ switch { '_' } else { '-' }; + *s += 1; + Some(out) + } else { + Some(c) + } + }) + .collect(), + ); + self.hyphen_combination_num += 1; + ret + } +} + +#[test] +fn no_hyphen() { + assert_eq!( + UncanonicalizedIter::new("test").collect::>(), + vec!["test".to_string()] + ) +} + +#[test] +fn two_hyphen() { + assert_eq!( + UncanonicalizedIter::new("te-_st").collect::>(), + vec![ + "te-_st".to_string(), + "te__st".to_string(), + "te--st".to_string(), + "te_-st".to_string() + ] + ) +} + +#[test] +fn overflow_hyphen() { + assert_eq!( + UncanonicalizedIter::new("te-_-_-_-_-_-_-_-_-st") + .take(100) + .count(), + 100 + ) +} + +pub struct RegistryIndex<'cfg> { + source_id: SourceId, + path: Filesystem, + cache: HashMap<&'static str, Vec<(Summary, bool)>>, + // `(name, vers)` -> `checksum` + hashes: HashMap<&'static str, HashMap>, + config: &'cfg Config, + locked: bool, +} + +impl<'cfg> RegistryIndex<'cfg> { + pub fn new( + source_id: SourceId, + path: &Filesystem, + config: &'cfg Config, + locked: bool, + ) -> RegistryIndex<'cfg> { + RegistryIndex { + source_id, + path: path.clone(), + cache: HashMap::new(), + hashes: HashMap::new(), + config, + locked, + } + } + + /// Returns the hash listed for a specified `PackageId`. + pub fn hash(&mut self, pkg: PackageId, load: &mut dyn RegistryData) -> CargoResult { + let name = pkg.name().as_str(); + let version = pkg.version(); + if let Some(s) = self.hashes.get(name).and_then(|v| v.get(version)) { + return Ok(s.clone()); + } + // Ok, we're missing the key, so parse the index file to load it. + self.summaries(name, load)?; + self.hashes + .get(name) + .and_then(|v| v.get(version)) + .ok_or_else(|| internal(format!("no hash listed for {}", pkg))) + .map(|s| s.clone()) + } + + /// Parses the on-disk metadata for the package provided. + /// + /// Returns a list of pairs of `(summary, yanked)` for the package name specified. + pub fn summaries( + &mut self, + name: &'static str, + load: &mut dyn RegistryData, + ) -> CargoResult<&Vec<(Summary, bool)>> { + if self.cache.contains_key(name) { + return Ok(&self.cache[name]); + } + let summaries = self.load_summaries(name, load)?; + self.cache.insert(name, summaries); + Ok(&self.cache[name]) + } + + fn load_summaries( + &mut self, + name: &str, + load: &mut dyn RegistryData, + ) -> CargoResult> { + // Prepare the `RegistryData` which will lazily initialize internal data + // structures. Note that this is also importantly needed to initialize + // to avoid deadlocks where we acquire a lock below but the `load` + // function inside *also* wants to acquire a lock. See an instance of + // this on #5551. + load.prepare()?; + let (root, _lock) = if self.locked { + let lock = self + .path + .open_ro(Path::new(INDEX_LOCK), self.config, "the registry index"); + match lock { + Ok(lock) => (lock.path().parent().unwrap().to_path_buf(), Some(lock)), + Err(_) => return Ok(Vec::new()), + } + } else { + (self.path.clone().into_path_unlocked(), None) + }; + + let fs_name = name + .chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + + // See module comment for why this is structured the way it is. + let raw_path = match fs_name.len() { + 1 => format!("1/{}", fs_name), + 2 => format!("2/{}", fs_name), + 3 => format!("3/{}/{}", &fs_name[..1], fs_name), + _ => format!("{}/{}/{}", &fs_name[0..2], &fs_name[2..4], fs_name), + }; + let mut ret = Vec::new(); + for path in UncanonicalizedIter::new(&raw_path).take(1024) { + let mut hit_closure = false; + let err = load.load(&root, Path::new(&path), &mut |contents| { + hit_closure = true; + let contents = str::from_utf8(contents) + .map_err(|_| failure::format_err!("registry index file was not valid utf-8"))?; + ret.reserve(contents.lines().count()); + let lines = contents.lines().map(|s| s.trim()).filter(|l| !l.is_empty()); + + let online = !self.config.cli_unstable().offline; + // Attempt forwards-compatibility on the index by ignoring + // everything that we ourselves don't understand, that should + // allow future cargo implementations to break the + // interpretation of each line here and older cargo will simply + // ignore the new lines. + ret.extend(lines.filter_map(|line| { + let (summary, locked) = match self.parse_registry_package(line) { + Ok(p) => p, + Err(e) => { + info!("failed to parse `{}` registry package: {}", name, e); + trace!("line: {}", line); + return None; + } + }; + if online || load.is_crate_downloaded(summary.package_id()) { + Some((summary, locked)) + } else { + None + } + })); + + Ok(()) + }); + + // We ignore lookup failures as those are just crates which don't exist + // or we haven't updated the registry yet. If we actually ran the + // closure though then we care about those errors. + if hit_closure { + err?; + // Crates.io ensures that there is only one hyphen and underscore equivalent + // result in the index so return when we find it. + return Ok(ret); + } + } + + Ok(ret) + } + + /// Parses a line from the registry's index file into a `Summary` for a package. + /// + /// The returned boolean is whether or not the summary has been yanked. + fn parse_registry_package(&mut self, line: &str) -> CargoResult<(Summary, bool)> { + let RegistryPackage { + name, + vers, + cksum, + deps, + features, + yanked, + links, + } = serde_json::from_str(line)?; + let pkgid = PackageId::new(&name, &vers, self.source_id)?; + let name = pkgid.name(); + let deps = deps + .into_iter() + .map(|dep| dep.into_dep(self.source_id)) + .collect::>>()?; + let summary = Summary::new(pkgid, deps, &features, links, false)?; + let summary = summary.set_checksum(cksum.clone()); + self.hashes + .entry(name.as_str()) + .or_insert_with(HashMap::new) + .insert(vers, cksum); + Ok((summary, yanked.unwrap_or(false))) + } + + pub fn query_inner( + &mut self, + dep: &Dependency, + load: &mut dyn RegistryData, + yanked_whitelist: &HashSet, + f: &mut dyn FnMut(Summary), + ) -> CargoResult<()> { + let source_id = self.source_id; + let name = dep.package_name().as_str(); + let summaries = self.summaries(name, load)?; + let summaries = summaries + .iter() + .filter(|&(summary, yanked)| { + !yanked || { + log::debug!("{:?}", yanked_whitelist); + log::debug!("{:?}", summary.package_id()); + yanked_whitelist.contains(&summary.package_id()) + } + }) + .map(|s| s.0.clone()); + + // Handle `cargo update --precise` here. If specified, our own source + // will have a precise version listed of the form + // `=o->` where `` is the name of a crate on + // this source, `` is the version installed and ` is the + // version requested (argument to `--precise`). + let summaries = summaries.filter(|s| match source_id.precise() { + Some(p) if p.starts_with(name) && p[name.len()..].starts_with('=') => { + let mut vers = p[name.len() + 1..].splitn(2, "->"); + if dep + .version_req() + .matches(&vers.next().unwrap().to_semver().unwrap()) + { + vers.next().unwrap() == s.version().to_string() + } else { + true + } + } + _ => true, + }); + + for summary in summaries { + f(summary); + } + Ok(()) + } +} diff --git a/src/cargo/sources/registry/local.rs b/src/cargo/sources/registry/local.rs new file mode 100644 index 000000000..ed649c9e5 --- /dev/null +++ b/src/cargo/sources/registry/local.rs @@ -0,0 +1,115 @@ +use std::io::prelude::*; +use std::io::SeekFrom; +use std::path::Path; + +use crate::core::PackageId; +use crate::sources::registry::{MaybeLock, RegistryConfig, RegistryData}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::{Config, FileLock, Filesystem, Sha256}; +use hex; + +pub struct LocalRegistry<'cfg> { + index_path: Filesystem, + root: Filesystem, + src_path: Filesystem, + config: &'cfg Config, +} + +impl<'cfg> LocalRegistry<'cfg> { + pub fn new(root: &Path, config: &'cfg Config, name: &str) -> LocalRegistry<'cfg> { + LocalRegistry { + src_path: config.registry_source_path().join(name), + index_path: Filesystem::new(root.join("index")), + root: Filesystem::new(root.to_path_buf()), + config, + } + } +} + +impl<'cfg> RegistryData for LocalRegistry<'cfg> { + fn prepare(&self) -> CargoResult<()> { + Ok(()) + } + + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load( + &self, + root: &Path, + path: &Path, + data: &mut dyn FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()> { + data(&paths::read_bytes(&root.join(path))?) + } + + fn config(&mut self) -> CargoResult> { + // Local registries don't have configuration for remote APIs or anything + // like that + Ok(None) + } + + fn update_index(&mut self) -> CargoResult<()> { + // Nothing to update, we just use what's on disk. Verify it actually + // exists though. We don't use any locks as we're just checking whether + // these directories exist. + let root = self.root.clone().into_path_unlocked(); + if !root.is_dir() { + failure::bail!("local registry path is not a directory: {}", root.display()) + } + let index_path = self.index_path.clone().into_path_unlocked(); + if !index_path.is_dir() { + failure::bail!( + "local registry index path is not a directory: {}", + index_path.display() + ) + } + Ok(()) + } + + fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult { + let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version()); + let mut crate_file = self.root.open_ro(&crate_file, self.config, "crate file")?; + + // If we've already got an unpacked version of this crate, then skip the + // checksum below as it is in theory already verified. + let dst = format!("{}-{}", pkg.name(), pkg.version()); + if self.src_path.join(dst).into_path_unlocked().exists() { + return Ok(MaybeLock::Ready(crate_file)); + } + + self.config.shell().status("Unpacking", pkg)?; + + // We don't actually need to download anything per-se, we just need to + // verify the checksum matches the .crate file itself. + let mut state = Sha256::new(); + let mut buf = [0; 64 * 1024]; + loop { + let n = crate_file + .read(&mut buf) + .chain_err(|| format!("failed to read `{}`", crate_file.path().display()))?; + if n == 0 { + break; + } + state.update(&buf[..n]); + } + if hex::encode(state.finish()) != checksum { + failure::bail!("failed to verify the checksum of `{}`", pkg) + } + + crate_file.seek(SeekFrom::Start(0))?; + + Ok(MaybeLock::Ready(crate_file)) + } + + fn finish_download( + &mut self, + _pkg: PackageId, + _checksum: &str, + _data: &[u8], + ) -> CargoResult { + panic!("this source doesn't download") + } +} diff --git a/src/cargo/sources/registry/mod.rs b/src/cargo/sources/registry/mod.rs new file mode 100644 index 000000000..86dd6f2cf --- /dev/null +++ b/src/cargo/sources/registry/mod.rs @@ -0,0 +1,637 @@ +//! A `Source` for registry-based packages. +//! +//! # What's a Registry? +//! +//! Registries are central locations where packages can be uploaded to, +//! discovered, and searched for. The purpose of a registry is to have a +//! location that serves as permanent storage for versions of a crate over time. +//! +//! Compared to git sources, a registry provides many packages as well as many +//! versions simultaneously. Git sources can also have commits deleted through +//! rebasings where registries cannot have their versions deleted. +//! +//! # The Index of a Registry +//! +//! One of the major difficulties with a registry is that hosting so many +//! packages may quickly run into performance problems when dealing with +//! dependency graphs. It's infeasible for cargo to download the entire contents +//! of the registry just to resolve one package's dependencies, for example. As +//! a result, cargo needs some efficient method of querying what packages are +//! available on a registry, what versions are available, and what the +//! dependencies for each version is. +//! +//! One method of doing so would be having the registry expose an HTTP endpoint +//! which can be queried with a list of packages and a response of their +//! dependencies and versions is returned. This is somewhat inefficient however +//! as we may have to hit the endpoint many times and we may have already +//! queried for much of the data locally already (for other packages, for +//! example). This also involves inventing a transport format between the +//! registry and Cargo itself, so this route was not taken. +//! +//! Instead, Cargo communicates with registries through a git repository +//! referred to as the Index. The Index of a registry is essentially an easily +//! query-able version of the registry's database for a list of versions of a +//! package as well as a list of dependencies for each version. +//! +//! Using git to host this index provides a number of benefits: +//! +//! * The entire index can be stored efficiently locally on disk. This means +//! that all queries of a registry can happen locally and don't need to touch +//! the network. +//! +//! * Updates of the index are quite efficient. Using git buys incremental +//! updates, compressed transmission, etc for free. The index must be updated +//! each time we need fresh information from a registry, but this is one +//! update of a git repository that probably hasn't changed a whole lot so +//! it shouldn't be too expensive. +//! +//! Additionally, each modification to the index is just appending a line at +//! the end of a file (the exact format is described later). This means that +//! the commits for an index are quite small and easily applied/compressable. +//! +//! ## The format of the Index +//! +//! The index is a store for the list of versions for all packages known, so its +//! format on disk is optimized slightly to ensure that `ls registry` doesn't +//! produce a list of all packages ever known. The index also wants to ensure +//! that there's not a million files which may actually end up hitting +//! filesystem limits at some point. To this end, a few decisions were made +//! about the format of the registry: +//! +//! 1. Each crate will have one file corresponding to it. Each version for a +//! crate will just be a line in this file. +//! 2. There will be two tiers of directories for crate names, under which +//! crates corresponding to those tiers will be located. +//! +//! As an example, this is an example hierarchy of an index: +//! +//! ```notrust +//! . +//! ├── 3 +//! │   └── u +//! │   └── url +//! ├── bz +//! │   └── ip +//! │   └── bzip2 +//! ├── config.json +//! ├── en +//! │   └── co +//! │   └── encoding +//! └── li +//!    ├── bg +//!    │   └── libgit2 +//!    └── nk +//!    └── link-config +//! ``` +//! +//! The root of the index contains a `config.json` file with a few entries +//! corresponding to the registry (see `RegistryConfig` below). +//! +//! Otherwise, there are three numbered directories (1, 2, 3) for crates with +//! names 1, 2, and 3 characters in length. The 1/2 directories simply have the +//! crate files underneath them, while the 3 directory is sharded by the first +//! letter of the crate name. +//! +//! Otherwise the top-level directory contains many two-letter directory names, +//! each of which has many sub-folders with two letters. At the end of all these +//! are the actual crate files themselves. +//! +//! The purpose of this layout is to hopefully cut down on `ls` sizes as well as +//! efficient lookup based on the crate name itself. +//! +//! ## Crate files +//! +//! Each file in the index is the history of one crate over time. Each line in +//! the file corresponds to one version of a crate, stored in JSON format (see +//! the `RegistryPackage` structure below). +//! +//! As new versions are published, new lines are appended to this file. The only +//! modifications to this file that should happen over time are yanks of a +//! particular version. +//! +//! # Downloading Packages +//! +//! The purpose of the Index was to provide an efficient method to resolve the +//! dependency graph for a package. So far we only required one network +//! interaction to update the registry's repository (yay!). After resolution has +//! been performed, however we need to download the contents of packages so we +//! can read the full manifest and build the source code. +//! +//! To accomplish this, this source's `download` method will make an HTTP +//! request per-package requested to download tarballs into a local cache. These +//! tarballs will then be unpacked into a destination folder. +//! +//! Note that because versions uploaded to the registry are frozen forever that +//! the HTTP download and unpacking can all be skipped if the version has +//! already been downloaded and unpacked. This caching allows us to only +//! download a package when absolutely necessary. +//! +//! # Filesystem Hierarchy +//! +//! Overall, the `$HOME/.cargo` looks like this when talking about the registry: +//! +//! ```notrust +//! # A folder under which all registry metadata is hosted (similar to +//! # $HOME/.cargo/git) +//! $HOME/.cargo/registry/ +//! +//! # For each registry that cargo knows about (keyed by hostname + hash) +//! # there is a folder which is the checked out version of the index for +//! # the registry in this location. Note that this is done so cargo can +//! # support multiple registries simultaneously +//! index/ +//! registry1-/ +//! registry2-/ +//! ... +//! +//! # This folder is a cache for all downloaded tarballs from a registry. +//! # Once downloaded and verified, a tarball never changes. +//! cache/ +//! registry1-/-.crate +//! ... +//! +//! # Location in which all tarballs are unpacked. Each tarball is known to +//! # be frozen after downloading, so transitively this folder is also +//! # frozen once its unpacked (it's never unpacked again) +//! src/ +//! registry1-/-/... +//! ... +//! ``` + +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::collections::HashSet; +use std::io::Write; +use std::path::{Path, PathBuf}; + +use flate2::read::GzDecoder; +use log::debug; +use semver::Version; +use serde::Deserialize; +use tar::Archive; + +use crate::core::dependency::{Dependency, Kind}; +use crate::core::source::MaybePackage; +use crate::core::{Package, PackageId, Source, SourceId, Summary}; +use crate::sources::PathSource; +use crate::util::errors::CargoResultExt; +use crate::util::hex; +use crate::util::to_url::ToUrl; +use crate::util::{internal, CargoResult, Config, FileLock, Filesystem}; + +const INDEX_LOCK: &str = ".cargo-index-lock"; +const PACKAGE_SOURCE_LOCK: &str = ".cargo-ok"; +pub const CRATES_IO_INDEX: &str = "https://github.com/rust-lang/crates.io-index"; +pub const CRATES_IO_REGISTRY: &str = "crates-io"; +const CRATE_TEMPLATE: &str = "{crate}"; +const VERSION_TEMPLATE: &str = "{version}"; + +pub struct RegistrySource<'cfg> { + source_id: SourceId, + src_path: Filesystem, + config: &'cfg Config, + updated: bool, + ops: Box, + index: index::RegistryIndex<'cfg>, + yanked_whitelist: HashSet, + index_locked: bool, +} + +#[derive(Deserialize)] +pub struct RegistryConfig { + /// Download endpoint for all crates. + /// + /// The string is a template which will generate the download URL for the + /// tarball of a specific version of a crate. The substrings `{crate}` and + /// `{version}` will be replaced with the crate's name and version + /// respectively. + /// + /// For backwards compatibility, if the string does not contain `{crate}` or + /// `{version}`, it will be extended with `/{crate}/{version}/download` to + /// support registries like crates.io which were crated before the + /// templating setup was created. + pub dl: String, + + /// API endpoint for the registry. This is what's actually hit to perform + /// operations like yanks, owner modifications, publish new crates, etc. + /// If this is None, the registry does not support API commands. + pub api: Option, +} + +#[derive(Deserialize)] +pub struct RegistryPackage<'a> { + name: Cow<'a, str>, + vers: Version, + deps: Vec>, + features: BTreeMap, Vec>>, + cksum: String, + yanked: Option, + links: Option>, +} + +#[test] +fn escaped_cher_in_json() { + let _: RegistryPackage<'_> = serde_json::from_str( + r#"{"name":"a","vers":"0.0.1","deps":[],"cksum":"bae3","features":{}}"#, + ) + .unwrap(); + let _: RegistryPackage<'_> = serde_json::from_str( + r#"{"name":"a","vers":"0.0.1","deps":[],"cksum":"bae3","features":{"test":["k","q"]},"links":"a-sys"}"# + ).unwrap(); + + // Now we add escaped cher all the places they can go + // these are not valid, but it should error later than json parsing + let _: RegistryPackage<'_> = serde_json::from_str( + r#"{ + "name":"This name has a escaped cher in it \n\t\" ", + "vers":"0.0.1", + "deps":[{ + "name": " \n\t\" ", + "req": " \n\t\" ", + "features": [" \n\t\" "], + "optional": true, + "default_features": true, + "target": " \n\t\" ", + "kind": " \n\t\" ", + "registry": " \n\t\" " + }], + "cksum":"bae3", + "features":{"test \n\t\" ":["k \n\t\" ","q \n\t\" "]}, + "links":" \n\t\" "}"#, + ) + .unwrap(); +} + +#[derive(Deserialize)] +#[serde(field_identifier, rename_all = "lowercase")] +enum Field { + Name, + Vers, + Deps, + Features, + Cksum, + Yanked, + Links, +} + +#[derive(Deserialize)] +struct RegistryDependency<'a> { + name: Cow<'a, str>, + req: Cow<'a, str>, + features: Vec>, + optional: bool, + default_features: bool, + target: Option>, + kind: Option>, + registry: Option>, + package: Option>, +} + +impl<'a> RegistryDependency<'a> { + /// Converts an encoded dependency in the registry to a cargo dependency + pub fn into_dep(self, default: SourceId) -> CargoResult { + let RegistryDependency { + name, + req, + mut features, + optional, + default_features, + target, + kind, + registry, + package, + } = self; + + let id = if let Some(registry) = ®istry { + SourceId::for_registry(®istry.to_url()?)? + } else { + default + }; + + let mut dep = + Dependency::parse_no_deprecated(package.as_ref().unwrap_or(&name), Some(&req), id)?; + if package.is_some() { + dep.set_explicit_name_in_toml(&name); + } + let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") { + "dev" => Kind::Development, + "build" => Kind::Build, + _ => Kind::Normal, + }; + + let platform = match target { + Some(target) => Some(target.parse()?), + None => None, + }; + + // Unfortunately older versions of cargo and/or the registry ended up + // publishing lots of entries where the features array contained the + // empty feature, "", inside. This confuses the resolution process much + // later on and these features aren't actually valid, so filter them all + // out here. + features.retain(|s| !s.is_empty()); + + // In index, "registry" is null if it is from the same index. + // In Cargo.toml, "registry" is None if it is from the default + if !id.is_default_registry() { + dep.set_registry_id(id); + } + + dep.set_optional(optional) + .set_default_features(default_features) + .set_features(features) + .set_platform(platform) + .set_kind(kind); + + Ok(dep) + } +} + +pub trait RegistryData { + fn prepare(&self) -> CargoResult<()>; + fn index_path(&self) -> &Filesystem; + fn load( + &self, + _root: &Path, + path: &Path, + data: &mut dyn FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()>; + fn config(&mut self) -> CargoResult>; + fn update_index(&mut self) -> CargoResult<()>; + fn download(&mut self, pkg: PackageId, checksum: &str) -> CargoResult; + fn finish_download( + &mut self, + pkg: PackageId, + checksum: &str, + data: &[u8], + ) -> CargoResult; + + fn is_crate_downloaded(&self, _pkg: PackageId) -> bool { + true + } +} + +pub enum MaybeLock { + Ready(FileLock), + Download { url: String, descriptor: String }, +} + +mod index; +mod local; +mod remote; + +fn short_name(id: SourceId) -> String { + let hash = hex::short_hash(&id); + let ident = id.url().host_str().unwrap_or("").to_string(); + format!("{}-{}", ident, hash) +} + +impl<'cfg> RegistrySource<'cfg> { + pub fn remote( + source_id: SourceId, + yanked_whitelist: &HashSet, + config: &'cfg Config, + ) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = remote::RemoteRegistry::new(source_id, config, &name); + RegistrySource::new( + source_id, + config, + &name, + Box::new(ops), + yanked_whitelist, + true, + ) + } + + pub fn local( + source_id: SourceId, + path: &Path, + yanked_whitelist: &HashSet, + config: &'cfg Config, + ) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = local::LocalRegistry::new(path, config, &name); + RegistrySource::new( + source_id, + config, + &name, + Box::new(ops), + yanked_whitelist, + false, + ) + } + + fn new( + source_id: SourceId, + config: &'cfg Config, + name: &str, + ops: Box, + yanked_whitelist: &HashSet, + index_locked: bool, + ) -> RegistrySource<'cfg> { + RegistrySource { + src_path: config.registry_source_path().join(name), + config, + source_id, + updated: false, + index: index::RegistryIndex::new(source_id, ops.index_path(), config, index_locked), + yanked_whitelist: yanked_whitelist.clone(), + index_locked, + ops, + } + } + + /// Decode the configuration stored within the registry. + /// + /// This requires that the index has been at least checked out. + pub fn config(&mut self) -> CargoResult> { + self.ops.config() + } + + /// Unpacks a downloaded package into a location where it's ready to be + /// compiled. + /// + /// No action is taken if the source looks like it's already unpacked. + fn unpack_package(&self, pkg: PackageId, tarball: &FileLock) -> CargoResult { + // The `.cargo-ok` file is used to track if the source is already + // unpacked and to lock the directory for unpacking. + let mut ok = { + let package_dir = format!("{}-{}", pkg.name(), pkg.version()); + let dst = self.src_path.join(&package_dir); + dst.create_dir()?; + + // Attempt to open a read-only copy first to avoid an exclusive write + // lock and also work with read-only filesystems. If the file has + // any data, assume the source is already unpacked. + if let Ok(ok) = dst.open_ro(PACKAGE_SOURCE_LOCK, self.config, &package_dir) { + let meta = ok.file().metadata()?; + if meta.len() > 0 { + let unpack_dir = ok.parent().to_path_buf(); + return Ok(unpack_dir); + } + } + + dst.open_rw(PACKAGE_SOURCE_LOCK, self.config, &package_dir)? + }; + let unpack_dir = ok.parent().to_path_buf(); + + // If the file has any data, assume the source is already unpacked. + let meta = ok.file().metadata()?; + if meta.len() > 0 { + return Ok(unpack_dir); + } + + let gz = GzDecoder::new(tarball.file()); + let mut tar = Archive::new(gz); + let prefix = unpack_dir.file_name().unwrap(); + let parent = unpack_dir.parent().unwrap(); + for entry in tar.entries()? { + let mut entry = entry.chain_err(|| "failed to iterate over archive")?; + let entry_path = entry + .path() + .chain_err(|| "failed to read entry path")? + .into_owned(); + + // We're going to unpack this tarball into the global source + // directory, but we want to make sure that it doesn't accidentally + // (or maliciously) overwrite source code from other crates. Cargo + // itself should never generate a tarball that hits this error, and + // crates.io should also block uploads with these sorts of tarballs, + // but be extra sure by adding a check here as well. + if !entry_path.starts_with(prefix) { + failure::bail!( + "invalid tarball downloaded, contains \ + a file at {:?} which isn't under {:?}", + entry_path, + prefix + ) + } + + // Once that's verified, unpack the entry as usual. + entry + .unpack_in(parent) + .chain_err(|| format!("failed to unpack entry at `{}`", entry_path.display()))?; + } + + // Write to the lock file to indicate that unpacking was successful. + write!(ok, "ok")?; + + Ok(unpack_dir) + } + + fn do_update(&mut self) -> CargoResult<()> { + self.ops.update_index()?; + let path = self.ops.index_path(); + self.index = + index::RegistryIndex::new(self.source_id, path, self.config, self.index_locked); + Ok(()) + } + + fn get_pkg(&mut self, package: PackageId, path: &FileLock) -> CargoResult { + let path = self + .unpack_package(package, path) + .chain_err(|| internal(format!("failed to unpack package `{}`", package)))?; + let mut src = PathSource::new(&path, self.source_id, self.config); + src.update()?; + let pkg = match src.download(package)? { + MaybePackage::Ready(pkg) => pkg, + MaybePackage::Download { .. } => unreachable!(), + }; + Ok(pkg) + } +} + +impl<'cfg> Source for RegistrySource<'cfg> { + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + // If this is a precise dependency, then it came from a lock file and in + // theory the registry is known to contain this version. If, however, we + // come back with no summaries, then our registry may need to be + // updated, so we fall back to performing a lazy update. + if dep.source_id().precise().is_some() && !self.updated { + debug!("attempting query without update"); + let mut called = false; + self.index + .query_inner(dep, &mut *self.ops, &self.yanked_whitelist, &mut |s| { + if dep.matches(&s) { + called = true; + f(s); + } + })?; + if called { + return Ok(()); + } else { + debug!("falling back to an update"); + self.do_update()?; + } + } + + self.index + .query_inner(dep, &mut *self.ops, &self.yanked_whitelist, &mut |s| { + if dep.matches(&s) { + f(s); + } + }) + } + + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + self.index + .query_inner(dep, &mut *self.ops, &self.yanked_whitelist, f) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + false + } + + fn source_id(&self) -> SourceId { + self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + // If we have an imprecise version then we don't know what we're going + // to look for, so we always attempt to perform an update here. + // + // If we have a precise version, then we'll update lazily during the + // querying phase. Note that precise in this case is only + // `Some("locked")` as other `Some` values indicate a `cargo update + // --precise` request + if self.source_id.precise() != Some("locked") { + self.do_update()?; + } else { + debug!("skipping update due to locked registry"); + } + Ok(()) + } + + fn download(&mut self, package: PackageId) -> CargoResult { + let hash = self.index.hash(package, &mut *self.ops)?; + match self.ops.download(package, &hash)? { + MaybeLock::Ready(file) => self.get_pkg(package, &file).map(MaybePackage::Ready), + MaybeLock::Download { url, descriptor } => { + Ok(MaybePackage::Download { url, descriptor }) + } + } + } + + fn finish_download(&mut self, package: PackageId, data: Vec) -> CargoResult { + let hash = self.index.hash(package, &mut *self.ops)?; + let file = self.ops.finish_download(package, &hash, &data)?; + self.get_pkg(package, &file) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } + + fn describe(&self) -> String { + self.source_id.display_registry() + } + + fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]) { + self.yanked_whitelist.extend(pkgs); + } +} diff --git a/src/cargo/sources/registry/remote.rs b/src/cargo/sources/registry/remote.rs new file mode 100644 index 000000000..dceb515c8 --- /dev/null +++ b/src/cargo/sources/registry/remote.rs @@ -0,0 +1,288 @@ +use std::cell::{Cell, Ref, RefCell}; +use std::fmt::Write as FmtWrite; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::mem; +use std::path::Path; +use std::str; + +use lazycell::LazyCell; +use log::{debug, trace}; + +use crate::core::{PackageId, SourceId}; +use crate::sources::git; +use crate::sources::registry::MaybeLock; +use crate::sources::registry::{ + RegistryConfig, RegistryData, CRATE_TEMPLATE, INDEX_LOCK, VERSION_TEMPLATE, +}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{Config, Sha256}; +use crate::util::{FileLock, Filesystem}; + +pub struct RemoteRegistry<'cfg> { + index_path: Filesystem, + cache_path: Filesystem, + source_id: SourceId, + config: &'cfg Config, + tree: RefCell>>, + repo: LazyCell, + head: Cell>, +} + +impl<'cfg> RemoteRegistry<'cfg> { + pub fn new(source_id: SourceId, config: &'cfg Config, name: &str) -> RemoteRegistry<'cfg> { + RemoteRegistry { + index_path: config.registry_index_path().join(name), + cache_path: config.registry_cache_path().join(name), + source_id, + config, + tree: RefCell::new(None), + repo: LazyCell::new(), + head: Cell::new(None), + } + } + + fn repo(&self) -> CargoResult<&git2::Repository> { + self.repo.try_borrow_with(|| { + let path = self.index_path.clone().into_path_unlocked(); + + // Fast path without a lock + if let Ok(repo) = git2::Repository::open(&path) { + trace!("opened a repo without a lock"); + return Ok(repo); + } + + // Ok, now we need to lock and try the whole thing over again. + trace!("acquiring registry index lock"); + let lock = self.index_path.open_rw( + Path::new(INDEX_LOCK), + self.config, + "the registry index", + )?; + match git2::Repository::open(&path) { + Ok(repo) => Ok(repo), + Err(_) => { + let _ = lock.remove_siblings(); + + // Note that we'd actually prefer to use a bare repository + // here as we're not actually going to check anything out. + // All versions of Cargo, though, share the same CARGO_HOME, + // so for compatibility with older Cargo which *does* do + // checkouts we make sure to initialize a new full + // repository (not a bare one). + // + // We should change this to `init_bare` whenever we feel + // like enough time has passed or if we change the directory + // that the folder is located in, such as by changing the + // hash at the end of the directory. + // + // Note that in the meantime we also skip `init.templatedir` + // as it can be misconfigured sometimes or otherwise add + // things that we don't want. + let mut opts = git2::RepositoryInitOptions::new(); + opts.external_template(false); + Ok(git2::Repository::init_opts(&path, &opts) + .chain_err(|| "failed to initialized index git repository")?) + } + } + }) + } + + fn head(&self) -> CargoResult { + if self.head.get().is_none() { + let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?; + self.head.set(Some(oid)); + } + Ok(self.head.get().unwrap()) + } + + fn tree(&self) -> CargoResult>> { + { + let tree = self.tree.borrow(); + if tree.is_some() { + return Ok(Ref::map(tree, |s| s.as_ref().unwrap())); + } + } + let repo = self.repo()?; + let commit = repo.find_commit(self.head()?)?; + let tree = commit.tree()?; + + // Unfortunately in libgit2 the tree objects look like they've got a + // reference to the repository object which means that a tree cannot + // outlive the repository that it came from. Here we want to cache this + // tree, though, so to accomplish this we transmute it to a static + // lifetime. + // + // Note that we don't actually hand out the static lifetime, instead we + // only return a scoped one from this function. Additionally the repo + // we loaded from (above) lives as long as this object + // (`RemoteRegistry`) so we then just need to ensure that the tree is + // destroyed first in the destructor, hence the destructor on + // `RemoteRegistry` below. + let tree = unsafe { mem::transmute::, git2::Tree<'static>>(tree) }; + *self.tree.borrow_mut() = Some(tree); + Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap())) + } + + fn filename(&self, pkg: PackageId) -> String { + format!("{}-{}.crate", pkg.name(), pkg.version()) + } +} + +impl<'cfg> RegistryData for RemoteRegistry<'cfg> { + fn prepare(&self) -> CargoResult<()> { + self.repo()?; // create intermediate dirs and initialize the repo + Ok(()) + } + + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load( + &self, + _root: &Path, + path: &Path, + data: &mut dyn FnMut(&[u8]) -> CargoResult<()>, + ) -> CargoResult<()> { + // Note that the index calls this method and the filesystem is locked + // in the index, so we don't need to worry about an `update_index` + // happening in a different process. + let repo = self.repo()?; + let tree = self.tree()?; + let entry = tree.get_path(path)?; + let object = entry.to_object(repo)?; + let blob = match object.as_blob() { + Some(blob) => blob, + None => failure::bail!("path `{}` is not a blob in the git repo", path.display()), + }; + data(blob.content()) + } + + fn config(&mut self) -> CargoResult> { + debug!("loading config"); + self.prepare()?; + let _lock = + self.index_path + .open_ro(Path::new(INDEX_LOCK), self.config, "the registry index")?; + let mut config = None; + self.load(Path::new(""), Path::new("config.json"), &mut |json| { + config = Some(serde_json::from_slice(json)?); + Ok(()) + })?; + trace!("config loaded"); + Ok(config) + } + + fn update_index(&mut self) -> CargoResult<()> { + if self.config.cli_unstable().offline { + return Ok(()); + } + if self.config.cli_unstable().no_index_update { + return Ok(()); + } + + debug!("updating the index"); + + // Ensure that we'll actually be able to acquire an HTTP handle later on + // once we start trying to download crates. This will weed out any + // problems with `.cargo/config` configuration related to HTTP. + // + // This way if there's a problem the error gets printed before we even + // hit the index, which may not actually read this configuration. + self.config.http()?; + + self.prepare()?; + self.head.set(None); + *self.tree.borrow_mut() = None; + let _lock = + self.index_path + .open_rw(Path::new(INDEX_LOCK), self.config, "the registry index")?; + self.config + .shell() + .status("Updating", self.source_id.display_registry())?; + + // git fetch origin master + let url = self.source_id.url(); + let refspec = "refs/heads/master:refs/remotes/origin/master"; + let repo = self.repo.borrow_mut().unwrap(); + git::fetch(repo, url, refspec, self.config) + .chain_err(|| format!("failed to fetch `{}`", url))?; + Ok(()) + } + + fn download(&mut self, pkg: PackageId, _checksum: &str) -> CargoResult { + let filename = self.filename(pkg); + + // Attempt to open an read-only copy first to avoid an exclusive write + // lock and also work with read-only filesystems. Note that we check the + // length of the file like below to handle interrupted downloads. + // + // If this fails then we fall through to the exclusive path where we may + // have to redownload the file. + if let Ok(dst) = self.cache_path.open_ro(&filename, self.config, &filename) { + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(MaybeLock::Ready(dst)); + } + } + + let config = self.config()?.unwrap(); + let mut url = config.dl; + if !url.contains(CRATE_TEMPLATE) && !url.contains(VERSION_TEMPLATE) { + write!(url, "/{}/{}/download", CRATE_TEMPLATE, VERSION_TEMPLATE).unwrap(); + } + let url = url + .replace(CRATE_TEMPLATE, &*pkg.name()) + .replace(VERSION_TEMPLATE, &pkg.version().to_string()); + + Ok(MaybeLock::Download { + url, + descriptor: pkg.to_string(), + }) + } + + fn finish_download( + &mut self, + pkg: PackageId, + checksum: &str, + data: &[u8], + ) -> CargoResult { + // Verify what we just downloaded + let mut state = Sha256::new(); + state.update(data); + if hex::encode(state.finish()) != checksum { + failure::bail!("failed to verify the checksum of `{}`", pkg) + } + + let filename = self.filename(pkg); + let mut dst = self.cache_path.open_rw(&filename, self.config, &filename)?; + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(dst); + } + + dst.write_all(data)?; + dst.seek(SeekFrom::Start(0))?; + Ok(dst) + } + + fn is_crate_downloaded(&self, pkg: PackageId) -> bool { + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let path = Path::new(&filename); + + if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) { + if let Ok(meta) = dst.file().metadata() { + return meta.len() > 0; + } + } + false + } +} + +impl<'cfg> Drop for RemoteRegistry<'cfg> { + fn drop(&mut self) { + // Just be sure to drop this before our other fields + self.tree.borrow_mut().take(); + } +} diff --git a/src/cargo/sources/replaced.rs b/src/cargo/sources/replaced.rs new file mode 100644 index 000000000..465b0f08d --- /dev/null +++ b/src/cargo/sources/replaced.rs @@ -0,0 +1,122 @@ +use crate::core::source::MaybePackage; +use crate::core::{Dependency, Package, PackageId, Source, SourceId, Summary}; +use crate::util::errors::{CargoResult, CargoResultExt}; + +pub struct ReplacedSource<'cfg> { + to_replace: SourceId, + replace_with: SourceId, + inner: Box, +} + +impl<'cfg> ReplacedSource<'cfg> { + pub fn new( + to_replace: SourceId, + replace_with: SourceId, + src: Box, + ) -> ReplacedSource<'cfg> { + ReplacedSource { + to_replace, + replace_with, + inner: src, + } + } +} + +impl<'cfg> Source for ReplacedSource<'cfg> { + fn source_id(&self) -> SourceId { + self.to_replace + } + + fn replaced_source_id(&self) -> SourceId { + self.replace_with + } + + fn supports_checksums(&self) -> bool { + self.inner.supports_checksums() + } + + fn requires_precise(&self) -> bool { + self.inner.requires_precise() + } + + fn query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let (replace_with, to_replace) = (self.replace_with, self.to_replace); + let dep = dep.clone().map_source(to_replace, replace_with); + + self.inner + .query(&dep, &mut |summary| { + f(summary.map_source(replace_with, to_replace)) + }) + .chain_err(|| format!("failed to query replaced source {}", self.to_replace))?; + Ok(()) + } + + fn fuzzy_query(&mut self, dep: &Dependency, f: &mut dyn FnMut(Summary)) -> CargoResult<()> { + let (replace_with, to_replace) = (self.replace_with, self.to_replace); + let dep = dep.clone().map_source(to_replace, replace_with); + + self.inner + .fuzzy_query(&dep, &mut |summary| { + f(summary.map_source(replace_with, to_replace)) + }) + .chain_err(|| format!("failed to query replaced source {}", self.to_replace))?; + Ok(()) + } + + fn update(&mut self) -> CargoResult<()> { + self.inner + .update() + .chain_err(|| format!("failed to update replaced source {}", self.to_replace))?; + Ok(()) + } + + fn download(&mut self, id: PackageId) -> CargoResult { + let id = id.with_source_id(self.replace_with); + let pkg = self + .inner + .download(id) + .chain_err(|| format!("failed to download replaced source {}", self.to_replace))?; + Ok(match pkg { + MaybePackage::Ready(pkg) => { + MaybePackage::Ready(pkg.map_source(self.replace_with, self.to_replace)) + } + other @ MaybePackage::Download { .. } => other, + }) + } + + fn finish_download(&mut self, id: PackageId, data: Vec) -> CargoResult { + let id = id.with_source_id(self.replace_with); + let pkg = self + .inner + .finish_download(id, data) + .chain_err(|| format!("failed to download replaced source {}", self.to_replace))?; + Ok(pkg.map_source(self.replace_with, self.to_replace)) + } + + fn fingerprint(&self, id: &Package) -> CargoResult { + self.inner.fingerprint(id) + } + + fn verify(&self, id: PackageId) -> CargoResult<()> { + let id = id.with_source_id(self.replace_with); + self.inner.verify(id) + } + + fn describe(&self) -> String { + format!( + "{} (which is replacing {})", + self.inner.describe(), + self.to_replace + ) + } + + fn is_replaced(&self) -> bool { + true + } + + fn add_to_yanked_whitelist(&mut self, pkgs: &[PackageId]) { + let pkgs = pkgs.iter().map(|id| id.with_source_id(self.replace_with)) + .collect::>(); + self.inner.add_to_yanked_whitelist(&pkgs); + } +} diff --git a/src/cargo/util/cfg.rs b/src/cargo/util/cfg.rs new file mode 100644 index 000000000..4c4ad232d --- /dev/null +++ b/src/cargo/util/cfg.rs @@ -0,0 +1,278 @@ +use std::fmt; +use std::iter; +use std::str::{self, FromStr}; + +use crate::util::CargoResult; + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Cfg { + Name(String), + KeyPair(String, String), +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum CfgExpr { + Not(Box), + All(Vec), + Any(Vec), + Value(Cfg), +} + +#[derive(PartialEq)] +enum Token<'a> { + LeftParen, + RightParen, + Ident(&'a str), + Comma, + Equals, + String(&'a str), +} + +struct Tokenizer<'a> { + s: iter::Peekable>, + orig: &'a str, +} + +struct Parser<'a> { + t: iter::Peekable>, +} + +impl FromStr for Cfg { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.cfg()?; + if p.t.next().is_some() { + failure::bail!("malformed cfg value or key/value pair: `{}`", s) + } + Ok(e) + } +} + +impl fmt::Display for Cfg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Cfg::Name(ref s) => s.fmt(f), + Cfg::KeyPair(ref k, ref v) => write!(f, "{} = \"{}\"", k, v), + } + } +} + +impl CfgExpr { + /// Utility function to check if the key, "cfg(..)" matches the `target_cfg` + pub fn matches_key(key: &str, target_cfg: &[Cfg]) -> bool { + if key.starts_with("cfg(") && key.ends_with(')') { + let cfg = &key[4..key.len() - 1]; + + CfgExpr::from_str(cfg) + .ok() + .map(|ce| ce.matches(target_cfg)) + .unwrap_or(false) + } else { + false + } + } + + pub fn matches(&self, cfg: &[Cfg]) -> bool { + match *self { + CfgExpr::Not(ref e) => !e.matches(cfg), + CfgExpr::All(ref e) => e.iter().all(|e| e.matches(cfg)), + CfgExpr::Any(ref e) => e.iter().any(|e| e.matches(cfg)), + CfgExpr::Value(ref e) => cfg.contains(e), + } + } +} + +impl FromStr for CfgExpr { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.expr()?; + if p.t.next().is_some() { + failure::bail!( + "can only have one cfg-expression, consider using all() or \ + any() explicitly" + ) + } + Ok(e) + } +} + +impl fmt::Display for CfgExpr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + CfgExpr::Not(ref e) => write!(f, "not({})", e), + CfgExpr::All(ref e) => write!(f, "all({})", CommaSep(e)), + CfgExpr::Any(ref e) => write!(f, "any({})", CommaSep(e)), + CfgExpr::Value(ref e) => write!(f, "{}", e), + } + } +} + +struct CommaSep<'a, T>(&'a [T]); + +impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for (i, v) in self.0.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", v)?; + } + Ok(()) + } +} + +impl<'a> Parser<'a> { + fn new(s: &'a str) -> Parser<'a> { + Parser { + t: Tokenizer { + s: s.char_indices().peekable(), + orig: s, + } + .peekable(), + } + } + + fn expr(&mut self) -> CargoResult { + match self.t.peek() { + Some(&Ok(Token::Ident(op @ "all"))) | Some(&Ok(Token::Ident(op @ "any"))) => { + self.t.next(); + let mut e = Vec::new(); + self.eat(&Token::LeftParen)?; + while !self.r#try(&Token::RightParen) { + e.push(self.expr()?); + if !self.r#try(&Token::Comma) { + self.eat(&Token::RightParen)?; + break; + } + } + if op == "all" { + Ok(CfgExpr::All(e)) + } else { + Ok(CfgExpr::Any(e)) + } + } + Some(&Ok(Token::Ident("not"))) => { + self.t.next(); + self.eat(&Token::LeftParen)?; + let e = self.expr()?; + self.eat(&Token::RightParen)?; + Ok(CfgExpr::Not(Box::new(e))) + } + Some(&Ok(..)) => self.cfg().map(CfgExpr::Value), + Some(&Err(..)) => Err(self.t.next().unwrap().err().unwrap()), + None => failure::bail!( + "expected start of a cfg expression, \ + found nothing" + ), + } + } + + fn cfg(&mut self) -> CargoResult { + match self.t.next() { + Some(Ok(Token::Ident(name))) => { + let e = if self.r#try(&Token::Equals) { + let val = match self.t.next() { + Some(Ok(Token::String(s))) => s, + Some(Ok(t)) => failure::bail!("expected a string, found {}", t.classify()), + Some(Err(e)) => return Err(e), + None => failure::bail!("expected a string, found nothing"), + }; + Cfg::KeyPair(name.to_string(), val.to_string()) + } else { + Cfg::Name(name.to_string()) + }; + Ok(e) + } + Some(Ok(t)) => failure::bail!("expected identifier, found {}", t.classify()), + Some(Err(e)) => Err(e), + None => failure::bail!("expected identifier, found nothing"), + } + } + + fn r#try(&mut self, token: &Token<'a>) -> bool { + match self.t.peek() { + Some(&Ok(ref t)) if token == t => {} + _ => return false, + } + self.t.next(); + true + } + + fn eat(&mut self, token: &Token<'a>) -> CargoResult<()> { + match self.t.next() { + Some(Ok(ref t)) if token == t => Ok(()), + Some(Ok(t)) => failure::bail!("expected {}, found {}", token.classify(), t.classify()), + Some(Err(e)) => Err(e), + None => failure::bail!("expected {}, but cfg expr ended", token.classify()), + } + } +} + +impl<'a> Iterator for Tokenizer<'a> { + type Item = CargoResult>; + + fn next(&mut self) -> Option>> { + loop { + match self.s.next() { + Some((_, ' ')) => {} + Some((_, '(')) => return Some(Ok(Token::LeftParen)), + Some((_, ')')) => return Some(Ok(Token::RightParen)), + Some((_, ',')) => return Some(Ok(Token::Comma)), + Some((_, '=')) => return Some(Ok(Token::Equals)), + Some((start, '"')) => { + while let Some((end, ch)) = self.s.next() { + if ch == '"' { + return Some(Ok(Token::String(&self.orig[start + 1..end]))); + } + } + return Some(Err(failure::format_err!("unterminated string in cfg"))); + } + Some((start, ch)) if is_ident_start(ch) => { + while let Some(&(end, ch)) = self.s.peek() { + if !is_ident_rest(ch) { + return Some(Ok(Token::Ident(&self.orig[start..end]))); + } else { + self.s.next(); + } + } + return Some(Ok(Token::Ident(&self.orig[start..]))); + } + Some((_, ch)) => { + return Some(Err(failure::format_err!( + "unexpected character in \ + cfg `{}`, expected parens, \ + a comma, an identifier, or \ + a string", + ch + ))); + } + None => return None, + } + } + } +} + +fn is_ident_start(ch: char) -> bool { + ch == '_' || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') +} + +fn is_ident_rest(ch: char) -> bool { + is_ident_start(ch) || ('0' <= ch && ch <= '9') +} + +impl<'a> Token<'a> { + fn classify(&self) -> &str { + match *self { + Token::LeftParen => "`(`", + Token::RightParen => "`)`", + Token::Ident(..) => "an identifier", + Token::Comma => "`,`", + Token::Equals => "`=`", + Token::String(..) => "a string", + } + } +} diff --git a/src/cargo/util/command_prelude.rs b/src/cargo/util/command_prelude.rs new file mode 100644 index 000000000..770a2fd2a --- /dev/null +++ b/src/cargo/util/command_prelude.rs @@ -0,0 +1,503 @@ +use std::fs; +use std::path::PathBuf; + +use crate::core::compiler::{BuildConfig, MessageFormat}; +use crate::core::Workspace; +use crate::ops::{CompileFilter, CompileOptions, NewOptions, Packages, VersionControl}; +use crate::sources::CRATES_IO_REGISTRY; +use crate::util::important_paths::find_root_manifest_for_wd; +use crate::util::{paths, validate_package_name}; +use crate::util::{ + print_available_benches, print_available_binaries, print_available_examples, + print_available_tests, +}; +use crate::CargoResult; +use clap::{self, SubCommand}; + +pub use crate::core::compiler::CompileMode; +pub use crate::{CliError, CliResult, Config}; +pub use clap::{AppSettings, Arg, ArgMatches}; + +pub type App = clap::App<'static, 'static>; + +pub trait AppExt: Sized { + fn _arg(self, arg: Arg<'static, 'static>) -> Self; + + fn arg_package_spec( + self, + package: &'static str, + all: &'static str, + exclude: &'static str, + ) -> Self { + self.arg_package_spec_simple(package) + ._arg(opt("all", all)) + ._arg(multi_opt("exclude", "SPEC", exclude)) + } + + fn arg_package_spec_simple(self, package: &'static str) -> Self { + self._arg(multi_opt("package", "SPEC", package).short("p")) + } + + fn arg_package(self, package: &'static str) -> Self { + self._arg(opt("package", package).short("p").value_name("SPEC")) + } + + fn arg_jobs(self) -> Self { + self._arg( + opt("jobs", "Number of parallel jobs, defaults to # of CPUs") + .short("j") + .value_name("N"), + ) + } + + fn arg_targets_all( + self, + lib: &'static str, + bin: &'static str, + bins: &'static str, + example: &'static str, + examples: &'static str, + test: &'static str, + tests: &'static str, + bench: &'static str, + benches: &'static str, + all: &'static str, + ) -> Self { + self.arg_targets_lib_bin(lib, bin, bins) + ._arg(optional_multi_opt("example", "NAME", example)) + ._arg(opt("examples", examples)) + ._arg(optional_multi_opt("test", "NAME", test)) + ._arg(opt("tests", tests)) + ._arg(optional_multi_opt("bench", "NAME", bench)) + ._arg(opt("benches", benches)) + ._arg(opt("all-targets", all)) + } + + fn arg_targets_lib_bin(self, lib: &'static str, bin: &'static str, bins: &'static str) -> Self { + self._arg(opt("lib", lib)) + ._arg(optional_multi_opt("bin", "NAME", bin)) + ._arg(opt("bins", bins)) + } + + fn arg_targets_bins_examples( + self, + bin: &'static str, + bins: &'static str, + example: &'static str, + examples: &'static str, + ) -> Self { + self._arg(optional_multi_opt("bin", "NAME", bin)) + ._arg(opt("bins", bins)) + ._arg(optional_multi_opt("example", "NAME", example)) + ._arg(opt("examples", examples)) + } + + fn arg_targets_bin_example(self, bin: &'static str, example: &'static str) -> Self { + self._arg(optional_multi_opt("bin", "NAME", bin)) + ._arg(optional_multi_opt("example", "NAME", example)) + } + + fn arg_features(self) -> Self { + self._arg( + opt("features", "Space-separated list of features to activate").value_name("FEATURES"), + ) + ._arg(opt("all-features", "Activate all available features")) + ._arg(opt( + "no-default-features", + "Do not activate the `default` feature", + )) + } + + fn arg_release(self, release: &'static str) -> Self { + self._arg(opt("release", release)) + } + + fn arg_doc(self, doc: &'static str) -> Self { + self._arg(opt("doc", doc)) + } + + fn arg_target_triple(self, target: &'static str) -> Self { + self._arg(opt("target", target).value_name("TRIPLE")) + } + + fn arg_target_dir(self) -> Self { + self._arg( + opt("target-dir", "Directory for all generated artifacts").value_name("DIRECTORY"), + ) + } + + fn arg_manifest_path(self) -> Self { + self._arg(opt("manifest-path", "Path to Cargo.toml").value_name("PATH")) + } + + fn arg_message_format(self) -> Self { + self._arg( + opt("message-format", "Error format") + .value_name("FMT") + .case_insensitive(true) + .possible_values(&["human", "json", "short"]) + .default_value("human"), + ) + } + + fn arg_build_plan(self) -> Self { + self._arg(opt("build-plan", "Output the build plan in JSON")) + } + + fn arg_new_opts(self) -> Self { + self._arg( + opt( + "vcs", + "\ + Initialize a new repository for the given version \ + control system (git, hg, pijul, or fossil) or do not \ + initialize any version control at all (none), overriding \ + a global configuration.", + ) + .value_name("VCS") + .possible_values(&["git", "hg", "pijul", "fossil", "none"]), + ) + ._arg(opt("bin", "Use a binary (application) template [default]")) + ._arg(opt("lib", "Use a library template")) + ._arg( + opt("edition", "Edition to set for the crate generated") + .possible_values(&["2015", "2018"]) + .value_name("YEAR"), + ) + ._arg( + opt( + "name", + "Set the resulting package name, defaults to the directory name", + ) + .value_name("NAME"), + ) + } + + fn arg_index(self) -> Self { + self._arg(opt("index", "Registry index URL to upload the package to").value_name("INDEX")) + ._arg( + opt("host", "DEPRECATED, renamed to '--index'") + .value_name("HOST") + .hidden(true), + ) + } + + fn arg_dry_run(self, dry_run: &'static str) -> Self { + self._arg(opt("dry-run", dry_run)) + } +} + +impl AppExt for App { + fn _arg(self, arg: Arg<'static, 'static>) -> Self { + self.arg(arg) + } +} + +pub fn opt(name: &'static str, help: &'static str) -> Arg<'static, 'static> { + Arg::with_name(name).long(name).help(help) +} + +pub fn optional_multi_opt( + name: &'static str, + value_name: &'static str, + help: &'static str, +) -> Arg<'static, 'static> { + opt(name, help) + .value_name(value_name) + .multiple(true) + .min_values(0) + .number_of_values(1) +} + +pub fn multi_opt( + name: &'static str, + value_name: &'static str, + help: &'static str, +) -> Arg<'static, 'static> { + // Note that all `.multiple(true)` arguments in Cargo should specify + // `.number_of_values(1)` as well, so that `--foo val1 val2` is + // *not* parsed as `foo` with values ["val1", "val2"]. + // `number_of_values` should become the default in clap 3. + opt(name, help) + .value_name(value_name) + .multiple(true) + .number_of_values(1) +} + +pub fn subcommand(name: &'static str) -> App { + SubCommand::with_name(name).settings(&[ + AppSettings::UnifiedHelpMessage, + AppSettings::DeriveDisplayOrder, + AppSettings::DontCollapseArgsInUsage, + ]) +} + +pub trait ArgMatchesExt { + fn value_of_u32(&self, name: &str) -> CargoResult> { + let arg = match self._value_of(name) { + None => None, + Some(arg) => Some(arg.parse::().map_err(|_| { + clap::Error::value_validation_auto(format!("could not parse `{}` as a number", arg)) + })?), + }; + Ok(arg) + } + + /// Returns value of the `name` command-line argument as an absolute path + fn value_of_path(&self, name: &str, config: &Config) -> Option { + self._value_of(name).map(|path| config.cwd().join(path)) + } + + fn root_manifest(&self, config: &Config) -> CargoResult { + if let Some(path) = self.value_of_path("manifest-path", config) { + // In general, we try to avoid normalizing paths in Cargo, + // but in this particular case we need it to fix #3586. + let path = paths::normalize_path(&path); + if !path.ends_with("Cargo.toml") { + failure::bail!("the manifest-path must be a path to a Cargo.toml file") + } + if fs::metadata(&path).is_err() { + failure::bail!( + "manifest path `{}` does not exist", + self._value_of("manifest-path").unwrap() + ) + } + return Ok(path); + } + find_root_manifest_for_wd(config.cwd()) + } + + fn workspace<'a>(&self, config: &'a Config) -> CargoResult> { + let root = self.root_manifest(config)?; + let mut ws = Workspace::new(&root, config)?; + if config.cli_unstable().avoid_dev_deps { + ws.set_require_optional_deps(false); + } + Ok(ws) + } + + fn jobs(&self) -> CargoResult> { + self.value_of_u32("jobs") + } + + fn target(&self) -> Option { + self._value_of("target").map(|s| s.to_string()) + } + + fn compile_options<'a>( + &self, + config: &'a Config, + mode: CompileMode, + workspace: Option<&Workspace<'a>>, + ) -> CargoResult> { + let spec = Packages::from_flags( + self._is_present("all"), + self._values_of("exclude"), + self._values_of("package"), + )?; + + let message_format = match self._value_of("message-format") { + None => MessageFormat::Human, + Some(f) => { + if f.eq_ignore_ascii_case("json") { + MessageFormat::Json + } else if f.eq_ignore_ascii_case("human") { + MessageFormat::Human + } else if f.eq_ignore_ascii_case("short") { + MessageFormat::Short + } else { + panic!("Impossible message format: {:?}", f) + } + } + }; + + let mut build_config = BuildConfig::new(config, self.jobs()?, &self.target(), mode)?; + build_config.message_format = message_format; + build_config.release = self._is_present("release"); + build_config.build_plan = self._is_present("build-plan"); + if build_config.build_plan && !config.cli_unstable().unstable_options { + Err(failure::format_err!( + "`--build-plan` flag is unstable, pass `-Z unstable-options` to enable it" + ))?; + }; + + let opts = CompileOptions { + config, + build_config, + features: self._values_of("features"), + all_features: self._is_present("all-features"), + no_default_features: self._is_present("no-default-features"), + spec, + filter: CompileFilter::new( + self._is_present("lib"), + self._values_of("bin"), + self._is_present("bins"), + self._values_of("test"), + self._is_present("tests"), + self._values_of("example"), + self._is_present("examples"), + self._values_of("bench"), + self._is_present("benches"), + self._is_present("all-targets"), + ), + target_rustdoc_args: None, + target_rustc_args: None, + local_rustdoc_args: None, + export_dir: None, + }; + + if let Some(ws) = workspace { + self.check_optional_opts(ws, &opts)?; + } + + Ok(opts) + } + + fn compile_options_for_single_package<'a>( + &self, + config: &'a Config, + mode: CompileMode, + workspace: Option<&Workspace<'a>>, + ) -> CargoResult> { + let mut compile_opts = self.compile_options(config, mode, workspace)?; + compile_opts.spec = Packages::Packages(self._values_of("package")); + Ok(compile_opts) + } + + fn new_options(&self, config: &Config) -> CargoResult { + let vcs = self._value_of("vcs").map(|vcs| match vcs { + "git" => VersionControl::Git, + "hg" => VersionControl::Hg, + "pijul" => VersionControl::Pijul, + "fossil" => VersionControl::Fossil, + "none" => VersionControl::NoVcs, + vcs => panic!("Impossible vcs: {:?}", vcs), + }); + NewOptions::new( + vcs, + self._is_present("bin"), + self._is_present("lib"), + self.value_of_path("path", config).unwrap(), + self._value_of("name").map(|s| s.to_string()), + self._value_of("edition").map(|s| s.to_string()), + self.registry(config)?, + ) + } + + fn registry(&self, config: &Config) -> CargoResult> { + match self._value_of("registry") { + Some(registry) => { + validate_package_name(registry, "registry name", "")?; + + if registry == CRATES_IO_REGISTRY { + // If "crates.io" is specified, then we just need to return `None`, + // as that will cause cargo to use crates.io. This is required + // for the case where a default alternative registry is used + // but the user wants to switch back to crates.io for a single + // command. + Ok(None) + } else { + Ok(Some(registry.to_string())) + } + } + None => config.default_registry(), + } + } + + fn index(&self, config: &Config) -> CargoResult> { + // TODO: deprecated. Remove once it has been decided `--host` can be removed + // We may instead want to repurpose the host flag, as mentioned in issue + // rust-lang/cargo#4208. + let msg = "The flag '--host' is no longer valid. + +Previous versions of Cargo accepted this flag, but it is being +deprecated. The flag is being renamed to 'index', as the flag +wants the location of the index. Please use '--index' instead. + +This will soon become a hard error, so it's either recommended +to update to a fixed version or contact the upstream maintainer +about this warning."; + + let index = match self._value_of("host") { + Some(host) => { + config.shell().warn(&msg)?; + Some(host.to_string()) + } + None => self._value_of("index").map(|s| s.to_string()), + }; + Ok(index) + } + + fn check_optional_opts( + &self, + workspace: &Workspace<'_>, + compile_opts: &CompileOptions<'_>, + ) -> CargoResult<()> { + if self.is_present_with_zero_values("example") { + print_available_examples(&workspace, &compile_opts)?; + } + + if self.is_present_with_zero_values("bin") { + print_available_binaries(&workspace, &compile_opts)?; + } + + if self.is_present_with_zero_values("bench") { + print_available_benches(&workspace, &compile_opts)?; + } + + if self.is_present_with_zero_values("test") { + print_available_tests(&workspace, &compile_opts)?; + } + + Ok(()) + } + + fn is_present_with_zero_values(&self, name: &str) -> bool { + self._is_present(name) && self._value_of(name).is_none() + } + + fn _value_of(&self, name: &str) -> Option<&str>; + + fn _values_of(&self, name: &str) -> Vec; + + fn _is_present(&self, name: &str) -> bool; +} + +impl<'a> ArgMatchesExt for ArgMatches<'a> { + fn _value_of(&self, name: &str) -> Option<&str> { + self.value_of(name) + } + + fn _values_of(&self, name: &str) -> Vec { + self.values_of(name) + .unwrap_or_default() + .map(|s| s.to_string()) + .collect() + } + + fn _is_present(&self, name: &str) -> bool { + self.is_present(name) + } +} + +pub fn values(args: &ArgMatches<'_>, name: &str) -> Vec { + args.values_of(name) + .unwrap_or_default() + .map(|s| s.to_string()) + .collect() +} + +#[derive(PartialEq, PartialOrd, Eq, Ord)] +pub enum CommandInfo { + BuiltIn { name: String, about: Option }, + External { name: String, path: PathBuf }, +} + +impl CommandInfo { + pub fn name(&self) -> String { + match self { + CommandInfo::BuiltIn { name, .. } => name.to_string(), + CommandInfo::External { name, .. } => name.to_string(), + } + } +} diff --git a/src/cargo/util/config.rs b/src/cargo/util/config.rs new file mode 100644 index 000000000..3f96618c7 --- /dev/null +++ b/src/cargo/util/config.rs @@ -0,0 +1,1647 @@ +use std::cell::{RefCell, RefMut}; +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::hash_map::HashMap; +use std::collections::HashSet; +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::{Once, ONCE_INIT}; +use std::time::Instant; +use std::vec; + +use curl::easy::Easy; +use lazycell::LazyCell; +use serde::Deserialize; +use serde::{de, de::IntoDeserializer}; +use url::Url; + +use crate::core::profiles::ConfigProfiles; +use crate::core::shell::Verbosity; +use crate::core::{CliUnstable, Shell, SourceId, Workspace}; +use crate::ops; +use crate::util::errors::{internal, CargoResult, CargoResultExt}; +use crate::util::toml as cargo_toml; +use crate::util::Filesystem; +use crate::util::Rustc; +use crate::util::ToUrl; +use crate::util::{paths, validate_package_name}; +use self::ConfigValue as CV; + +/// Configuration information for cargo. This is not specific to a build, it is information +/// relating to cargo itself. +/// +/// This struct implements `Default`: all fields can be inferred. +#[derive(Debug)] +pub struct Config { + /// The location of the user's 'home' directory. OS-dependent. + home_path: Filesystem, + /// Information about how to write messages to the shell + shell: RefCell, + /// A collection of configuration options + values: LazyCell>, + /// The current working directory of cargo + cwd: PathBuf, + /// The location of the cargo executable (path to current process) + cargo_exe: LazyCell, + /// The location of the rustdoc executable + rustdoc: LazyCell, + /// Whether we are printing extra verbose messages + extra_verbose: bool, + /// `frozen` is set if we shouldn't access the network + frozen: bool, + /// `locked` is set if we should not update lock files + locked: bool, + /// A global static IPC control mechanism (used for managing parallel builds) + jobserver: Option, + /// Cli flags of the form "-Z something" + cli_flags: CliUnstable, + /// A handle on curl easy mode for http calls + easy: LazyCell>, + /// Cache of the `SourceId` for crates.io + crates_io_source_id: LazyCell, + /// If false, don't cache `rustc --version --verbose` invocations + cache_rustc_info: bool, + /// Creation time of this config, used to output the total build time + creation_time: Instant, + /// Target Directory via resolved Cli parameter + target_dir: Option, + /// Environment variables, separated to assist testing. + env: HashMap, + /// Profiles loaded from config. + profiles: LazyCell, +} + +impl Config { + pub fn new(shell: Shell, cwd: PathBuf, homedir: PathBuf) -> Config { + static mut GLOBAL_JOBSERVER: *mut jobserver::Client = 0 as *mut _; + static INIT: Once = ONCE_INIT; + + // This should be called early on in the process, so in theory the + // unsafety is ok here. (taken ownership of random fds) + INIT.call_once(|| unsafe { + if let Some(client) = jobserver::Client::from_env() { + GLOBAL_JOBSERVER = Box::into_raw(Box::new(client)); + } + }); + + let env: HashMap<_, _> = env::vars_os() + .filter_map(|(k, v)| { + // Ignore any key/values that are not valid Unicode. + match (k.into_string(), v.into_string()) { + (Ok(k), Ok(v)) => Some((k, v)), + _ => None, + } + }) + .collect(); + + let cache_rustc_info = match env.get("CARGO_CACHE_RUSTC_INFO") { + Some(cache) => cache != "0", + _ => true, + }; + + Config { + home_path: Filesystem::new(homedir), + shell: RefCell::new(shell), + cwd, + values: LazyCell::new(), + cargo_exe: LazyCell::new(), + rustdoc: LazyCell::new(), + extra_verbose: false, + frozen: false, + locked: false, + jobserver: unsafe { + if GLOBAL_JOBSERVER.is_null() { + None + } else { + Some((*GLOBAL_JOBSERVER).clone()) + } + }, + cli_flags: CliUnstable::default(), + easy: LazyCell::new(), + crates_io_source_id: LazyCell::new(), + cache_rustc_info, + creation_time: Instant::now(), + target_dir: None, + env, + profiles: LazyCell::new(), + } + } + + pub fn default() -> CargoResult { + let shell = Shell::new(); + let cwd = + env::current_dir().chain_err(|| "couldn't get the current directory of the process")?; + let homedir = homedir(&cwd).ok_or_else(|| { + failure::format_err!( + "Cargo couldn't find your home directory. \ + This probably means that $HOME was not set." + ) + })?; + Ok(Config::new(shell, cwd, homedir)) + } + + /// Gets the user's Cargo home directory (OS-dependent). + pub fn home(&self) -> &Filesystem { + &self.home_path + } + + /// Gets the Cargo Git directory (`/git`). + pub fn git_path(&self) -> Filesystem { + self.home_path.join("git") + } + + /// Gets the Cargo registry index directory (`/registry/index`). + pub fn registry_index_path(&self) -> Filesystem { + self.home_path.join("registry").join("index") + } + + /// Gets the Cargo registry cache directory (`/registry/path`). + pub fn registry_cache_path(&self) -> Filesystem { + self.home_path.join("registry").join("cache") + } + + /// Gets the Cargo registry source directory (`/registry/src`). + pub fn registry_source_path(&self) -> Filesystem { + self.home_path.join("registry").join("src") + } + + /// Gets the default Cargo registry. + pub fn default_registry(&self) -> CargoResult> { + Ok(match self.get_string("registry.default")? { + Some(registry) => Some(registry.val), + None => None, + }) + } + + /// Gets a reference to the shell, e.g., for writing error messages. + pub fn shell(&self) -> RefMut<'_, Shell> { + self.shell.borrow_mut() + } + + /// Gets the path to the `rustdoc` executable. + pub fn rustdoc(&self) -> CargoResult<&Path> { + self.rustdoc + .try_borrow_with(|| self.get_tool("rustdoc")) + .map(AsRef::as_ref) + } + + /// Gets the path to the `rustc` executable. + pub fn rustc(&self, ws: Option<&Workspace<'_>>) -> CargoResult { + let cache_location = ws.map(|ws| { + ws.target_dir() + .join(".rustc_info.json") + .into_path_unlocked() + }); + Rustc::new( + self.get_tool("rustc")?, + self.maybe_get_tool("rustc_wrapper")?, + &self + .home() + .join("bin") + .join("rustc") + .into_path_unlocked() + .with_extension(env::consts::EXE_EXTENSION), + if self.cache_rustc_info { + cache_location + } else { + None + }, + ) + } + + /// Gets the path to the `cargo` executable. + pub fn cargo_exe(&self) -> CargoResult<&Path> { + self.cargo_exe + .try_borrow_with(|| { + fn from_current_exe() -> CargoResult { + // Try fetching the path to `cargo` using `env::current_exe()`. + // The method varies per operating system and might fail; in particular, + // it depends on `/proc` being mounted on Linux, and some environments + // (like containers or chroots) may not have that available. + let exe = env::current_exe()?.canonicalize()?; + Ok(exe) + } + + fn from_argv() -> CargoResult { + // Grab `argv[0]` and attempt to resolve it to an absolute path. + // If `argv[0]` has one component, it must have come from a `PATH` lookup, + // so probe `PATH` in that case. + // Otherwise, it has multiple components and is either: + // - a relative path (e.g., `./cargo`, `target/debug/cargo`), or + // - an absolute path (e.g., `/usr/local/bin/cargo`). + // In either case, `Path::canonicalize` will return the full absolute path + // to the target if it exists. + let argv0 = env::args_os() + .map(PathBuf::from) + .next() + .ok_or_else(|| failure::format_err!("no argv[0]"))?; + paths::resolve_executable(&argv0) + } + + let exe = from_current_exe() + .or_else(|_| from_argv()) + .chain_err(|| "couldn't get the path to cargo executable")?; + Ok(exe) + }) + .map(AsRef::as_ref) + } + + pub fn profiles(&self) -> CargoResult<&ConfigProfiles> { + self.profiles.try_borrow_with(|| { + let ocp = self.get::>("profile")?; + if let Some(config_profiles) = ocp { + // Warn if config profiles without CLI option. + if !self.cli_unstable().config_profile { + self.shell().warn( + "profiles in config files require `-Z config-profile` \ + command-line option", + )?; + return Ok(ConfigProfiles::default()); + } + Ok(config_profiles) + } else { + Ok(ConfigProfiles::default()) + } + }) + } + + pub fn values(&self) -> CargoResult<&HashMap> { + self.values.try_borrow_with(|| self.load_values()) + } + + // Note: this is used by RLS, not Cargo. + pub fn set_values(&self, values: HashMap) -> CargoResult<()> { + if self.values.borrow().is_some() { + failure::bail!("config values already found") + } + match self.values.fill(values) { + Ok(()) => Ok(()), + Err(_) => failure::bail!("could not fill values"), + } + } + + pub fn reload_rooted_at_cargo_home(&mut self) -> CargoResult<()> { + let home = self.home_path.clone().into_path_unlocked(); + let values = self.load_values_from(&home)?; + self.values.replace(values); + Ok(()) + } + + pub fn cwd(&self) -> &Path { + &self.cwd + } + + pub fn target_dir(&self) -> CargoResult> { + if let Some(ref dir) = self.target_dir { + Ok(Some(dir.clone())) + } else if let Some(dir) = env::var_os("CARGO_TARGET_DIR") { + Ok(Some(Filesystem::new(self.cwd.join(dir)))) + } else if let Some(val) = self.get_path("build.target-dir")? { + let val = self.cwd.join(val.val); + Ok(Some(Filesystem::new(val))) + } else { + Ok(None) + } + } + + fn get_cv(&self, key: &str) -> CargoResult> { + let vals = self.values()?; + let mut parts = key.split('.').enumerate(); + let mut val = match vals.get(parts.next().unwrap().1) { + Some(val) => val, + None => return Ok(None), + }; + for (i, part) in parts { + match *val { + CV::Table(ref map, _) => { + val = match map.get(part) { + Some(val) => val, + None => return Ok(None), + } + } + CV::Integer(_, ref path) + | CV::String(_, ref path) + | CV::List(_, ref path) + | CV::Boolean(_, ref path) => { + let idx = key.split('.').take(i).fold(0, |n, s| n + s.len()) + i - 1; + let key_so_far = &key[..idx]; + failure::bail!( + "expected table for configuration key `{}`, \ + but found {} in {}", + key_so_far, + val.desc(), + path.display() + ) + } + } + } + Ok(Some(val.clone())) + } + + // Helper primarily for testing. + pub fn set_env(&mut self, env: HashMap) { + self.env = env; + } + + fn get_env(&self, key: &ConfigKey) -> Result, ConfigError> + where + T: FromStr, + ::Err: fmt::Display, + { + let key = key.to_env(); + match self.env.get(&key) { + Some(value) => { + let definition = Definition::Environment(key); + Ok(Some(Value { + val: value + .parse() + .map_err(|e| ConfigError::new(format!("{}", e), definition.clone()))?, + definition, + })) + } + None => Ok(None), + } + } + + fn has_key(&self, key: &ConfigKey) -> bool { + let env_key = key.to_env(); + if self.env.get(&env_key).is_some() { + return true; + } + let env_pattern = format!("{}_", env_key); + if self.env.keys().any(|k| k.starts_with(&env_pattern)) { + return true; + } + if let Ok(o_cv) = self.get_cv(&key.to_config()) { + if o_cv.is_some() { + return true; + } + } + false + } + + pub fn get_string(&self, key: &str) -> CargoResult> { + self.get_string_priv(&ConfigKey::from_str(key)) + .map_err(|e| e.into()) + } + + fn get_string_priv(&self, key: &ConfigKey) -> Result, ConfigError> { + match self.get_env(key)? { + Some(v) => Ok(Some(v)), + None => { + let config_key = key.to_config(); + let o_cv = self.get_cv(&config_key)?; + match o_cv { + Some(CV::String(s, path)) => Ok(Some(Value { + val: s, + definition: Definition::Path(path), + })), + Some(cv) => Err(ConfigError::expected(&config_key, "a string", &cv)), + None => Ok(None), + } + } + } + } + + pub fn get_bool(&self, key: &str) -> CargoResult> { + self.get_bool_priv(&ConfigKey::from_str(key)) + .map_err(|e| e.into()) + } + + fn get_bool_priv(&self, key: &ConfigKey) -> Result, ConfigError> { + match self.get_env(key)? { + Some(v) => Ok(Some(v)), + None => { + let config_key = key.to_config(); + let o_cv = self.get_cv(&config_key)?; + match o_cv { + Some(CV::Boolean(b, path)) => Ok(Some(Value { + val: b, + definition: Definition::Path(path), + })), + Some(cv) => Err(ConfigError::expected(&config_key, "true/false", &cv)), + None => Ok(None), + } + } + } + } + + fn string_to_path(&self, value: String, definition: &Definition) -> PathBuf { + let is_path = value.contains('/') || (cfg!(windows) && value.contains('\\')); + if is_path { + definition.root(self).join(value) + } else { + // A pathless name. + PathBuf::from(value) + } + } + + pub fn get_path(&self, key: &str) -> CargoResult> { + if let Some(val) = self.get_string(key)? { + Ok(Some(Value { + val: self.string_to_path(val.val, &val.definition), + definition: val.definition, + })) + } else { + Ok(None) + } + } + + pub fn get_path_and_args(&self, key: &str) -> CargoResult)>> { + if let Some(mut val) = self.get_list_or_split_string(key)? { + if !val.val.is_empty() { + return Ok(Some(Value { + val: ( + self.string_to_path(val.val.remove(0), &val.definition), + val.val, + ), + definition: val.definition, + })); + } + } + Ok(None) + } + + // NOTE: this does **not** support environment variables. Use `get` instead + // if you want that. + pub fn get_list(&self, key: &str) -> CargoResult>> { + match self.get_cv(key)? { + Some(CV::List(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("list", key, &val), + None => Ok(None), + } + } + + pub fn get_list_or_split_string(&self, key: &str) -> CargoResult>> { + if let Some(value) = self.get_env::(&ConfigKey::from_str(key))? { + return Ok(Some(Value { + val: value.val.split(' ').map(str::to_string).collect(), + definition: value.definition, + })); + } + + match self.get_cv(key)? { + Some(CV::List(i, path)) => Ok(Some(Value { + val: i.into_iter().map(|(s, _)| s).collect(), + definition: Definition::Path(path), + })), + Some(CV::String(i, path)) => Ok(Some(Value { + val: i.split(' ').map(str::to_string).collect(), + definition: Definition::Path(path), + })), + Some(val) => self.expected("list or string", key, &val), + None => Ok(None), + } + } + + pub fn get_table(&self, key: &str) -> CargoResult>> { + match self.get_cv(key)? { + Some(CV::Table(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(val) => self.expected("table", key, &val), + None => Ok(None), + } + } + + // Recommended to use `get` if you want a specific type, such as an unsigned value. + // Example: `config.get::>("some.key")?`. + pub fn get_i64(&self, key: &str) -> CargoResult> { + self.get_integer(&ConfigKey::from_str(key)) + .map_err(|e| e.into()) + } + + fn get_integer(&self, key: &ConfigKey) -> Result, ConfigError> { + let config_key = key.to_config(); + match self.get_env::(key)? { + Some(v) => Ok(Some(v)), + None => match self.get_cv(&config_key)? { + Some(CV::Integer(i, path)) => Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })), + Some(cv) => Err(ConfigError::expected(&config_key, "an integer", &cv)), + None => Ok(None), + }, + } + } + + fn expected(&self, ty: &str, key: &str, val: &CV) -> CargoResult { + val.expected(ty, key) + .map_err(|e| failure::format_err!("invalid configuration for key `{}`\n{}", key, e)) + } + + pub fn configure( + &mut self, + verbose: u32, + quiet: Option, + color: &Option, + frozen: bool, + locked: bool, + target_dir: &Option, + unstable_flags: &[String], + ) -> CargoResult<()> { + let extra_verbose = verbose >= 2; + let verbose = if verbose == 0 { None } else { Some(true) }; + + // Ignore errors in the configuration files. + let cfg_verbose = self.get_bool("term.verbose").unwrap_or(None).map(|v| v.val); + let cfg_color = self.get_string("term.color").unwrap_or(None).map(|v| v.val); + + let color = color.as_ref().or_else(|| cfg_color.as_ref()); + + let verbosity = match (verbose, cfg_verbose, quiet) { + (Some(true), _, None) | (None, Some(true), None) => Verbosity::Verbose, + + // Command line takes precedence over configuration, so ignore the + // configuration.. + (None, _, Some(true)) => Verbosity::Quiet, + + // Can't pass both at the same time on the command line regardless + // of configuration. + (Some(true), _, Some(true)) => { + failure::bail!("cannot set both --verbose and --quiet"); + } + + // Can't actually get `Some(false)` as a value from the command + // line, so just ignore them here to appease exhaustiveness checking + // in match statements. + (Some(false), _, _) + | (_, _, Some(false)) + | (None, Some(false), None) + | (None, None, None) => Verbosity::Normal, + }; + + let cli_target_dir = match target_dir.as_ref() { + Some(dir) => Some(Filesystem::new(dir.clone())), + None => None, + }; + + self.shell().set_verbosity(verbosity); + self.shell().set_color_choice(color.map(|s| &s[..]))?; + self.extra_verbose = extra_verbose; + self.frozen = frozen; + self.locked = locked; + self.target_dir = cli_target_dir; + self.cli_flags.parse(unstable_flags)?; + + Ok(()) + } + + pub fn cli_unstable(&self) -> &CliUnstable { + &self.cli_flags + } + + pub fn extra_verbose(&self) -> bool { + self.extra_verbose + } + + pub fn network_allowed(&self) -> bool { + !self.frozen() && !self.cli_unstable().offline + } + + pub fn frozen(&self) -> bool { + self.frozen + } + + pub fn lock_update_allowed(&self) -> bool { + !self.frozen && !self.locked + } + + /// Loads configuration from the filesystem. + pub fn load_values(&self) -> CargoResult> { + self.load_values_from(&self.cwd) + } + + fn load_values_from(&self, path: &Path) -> CargoResult> { + let mut cfg = CV::Table(HashMap::new(), PathBuf::from(".")); + let home = self.home_path.clone().into_path_unlocked(); + + walk_tree(path, &home, |path| { + let mut contents = String::new(); + let mut file = File::open(&path)?; + file.read_to_string(&mut contents) + .chain_err(|| format!("failed to read configuration file `{}`", path.display()))?; + let toml = cargo_toml::parse(&contents, path, self).chain_err(|| { + format!("could not parse TOML configuration in `{}`", path.display()) + })?; + let value = CV::from_toml(path, toml).chain_err(|| { + format!( + "failed to load TOML configuration from `{}`", + path.display() + ) + })?; + cfg.merge(value) + .chain_err(|| format!("failed to merge configuration at `{}`", path.display()))?; + Ok(()) + }) + .chain_err(|| "could not load Cargo configuration")?; + + self.load_credentials(&mut cfg)?; + match cfg { + CV::Table(map, _) => Ok(map), + _ => unreachable!(), + } + } + + /// Gets the index for a registry. + pub fn get_registry_index(&self, registry: &str) -> CargoResult { + validate_package_name(registry, "registry name", "")?; + Ok( + match self.get_string(&format!("registries.{}.index", registry))? { + Some(index) => { + let url = index.val.to_url()?; + if url.password().is_some() { + failure::bail!("Registry URLs may not contain passwords"); + } + url + } + None => failure::bail!("No index found for registry: `{}`", registry), + }, + ) + } + + /// Loads credentials config from the credentials file into the `ConfigValue` object, if + /// present. + fn load_credentials(&self, cfg: &mut ConfigValue) -> CargoResult<()> { + let home_path = self.home_path.clone().into_path_unlocked(); + let credentials = home_path.join("credentials"); + if fs::metadata(&credentials).is_err() { + return Ok(()); + } + + let mut contents = String::new(); + let mut file = File::open(&credentials)?; + file.read_to_string(&mut contents).chain_err(|| { + format!( + "failed to read configuration file `{}`", + credentials.display() + ) + })?; + + let toml = cargo_toml::parse(&contents, &credentials, self).chain_err(|| { + format!( + "could not parse TOML configuration in `{}`", + credentials.display() + ) + })?; + + let mut value = CV::from_toml(&credentials, toml).chain_err(|| { + format!( + "failed to load TOML configuration from `{}`", + credentials.display() + ) + })?; + + // Backwards compatibility for old `.cargo/credentials` layout. + { + let value = match value { + CV::Table(ref mut value, _) => value, + _ => unreachable!(), + }; + + if let Some(token) = value.remove("token") { + if let Vacant(entry) = value.entry("registry".into()) { + let mut map = HashMap::new(); + map.insert("token".into(), token); + let table = CV::Table(map, PathBuf::from(".")); + entry.insert(table); + } + } + } + + // We want value to override `cfg`, so swap these. + mem::swap(cfg, &mut value); + cfg.merge(value)?; + + Ok(()) + } + + /// Looks for a path for `tool` in an environment variable or config path, and returns `None` + /// if it's not present. + fn maybe_get_tool(&self, tool: &str) -> CargoResult> { + let var = tool + .chars() + .flat_map(|c| c.to_uppercase()) + .collect::(); + if let Some(tool_path) = env::var_os(&var) { + let maybe_relative = match tool_path.to_str() { + Some(s) => s.contains('/') || s.contains('\\'), + None => false, + }; + let path = if maybe_relative { + self.cwd.join(tool_path) + } else { + PathBuf::from(tool_path) + }; + return Ok(Some(path)); + } + + let var = format!("build.{}", tool); + if let Some(tool_path) = self.get_path(&var)? { + return Ok(Some(tool_path.val)); + } + + Ok(None) + } + + /// Looks for a path for `tool` in an environment variable or config path, defaulting to `tool` + /// as a path. + fn get_tool(&self, tool: &str) -> CargoResult { + self.maybe_get_tool(tool) + .map(|t| t.unwrap_or_else(|| PathBuf::from(tool))) + } + + pub fn jobserver_from_env(&self) -> Option<&jobserver::Client> { + self.jobserver.as_ref() + } + + pub fn http(&self) -> CargoResult<&RefCell> { + let http = self + .easy + .try_borrow_with(|| ops::http_handle(self).map(RefCell::new))?; + { + let mut http = http.borrow_mut(); + http.reset(); + let timeout = ops::configure_http_handle(self, &mut http)?; + timeout.configure(&mut http)?; + } + Ok(http) + } + + pub fn crates_io_source_id(&self, f: F) -> CargoResult + where + F: FnMut() -> CargoResult, + { + Ok(*(self.crates_io_source_id.try_borrow_with(f)?)) + } + + pub fn creation_time(&self) -> Instant { + self.creation_time + } + + // Retrieves a config variable. + // + // This supports most serde `Deserialize` types. Examples: + // + // let v: Option = config.get("some.nested.key")?; + // let v: Option = config.get("some.key")?; + // let v: Option> = config.get("foo")?; + pub fn get<'de, T: de::Deserialize<'de>>(&self, key: &str) -> CargoResult { + let d = Deserializer { + config: self, + key: ConfigKey::from_str(key), + }; + T::deserialize(d).map_err(|e| e.into()) + } +} + +/// A segment of a config key. +/// +/// Config keys are split on dots for regular keys, or underscores for +/// environment keys. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +enum ConfigKeyPart { + /// Case-insensitive part (checks uppercase in environment keys). + Part(String), + /// Case-sensitive part (environment keys must match exactly). + CasePart(String), +} + +impl ConfigKeyPart { + fn to_env(&self) -> String { + match self { + ConfigKeyPart::Part(s) => s.replace("-", "_").to_uppercase(), + ConfigKeyPart::CasePart(s) => s.clone(), + } + } + + fn to_config(&self) -> String { + match self { + ConfigKeyPart::Part(s) => s.clone(), + ConfigKeyPart::CasePart(s) => s.clone(), + } + } +} + +/// Key for a configuration variable. +#[derive(Debug, Clone)] +struct ConfigKey(Vec); + +impl ConfigKey { + fn from_str(key: &str) -> ConfigKey { + ConfigKey( + key.split('.') + .map(|p| ConfigKeyPart::Part(p.to_string())) + .collect(), + ) + } + + fn join(&self, next: ConfigKeyPart) -> ConfigKey { + let mut res = self.clone(); + res.0.push(next); + res + } + + fn to_env(&self) -> String { + format!( + "CARGO_{}", + self.0 + .iter() + .map(|p| p.to_env()) + .collect::>() + .join("_") + ) + } + + fn to_config(&self) -> String { + self.0 + .iter() + .map(|p| p.to_config()) + .collect::>() + .join(".") + } + + fn last(&self) -> &ConfigKeyPart { + self.0.last().unwrap() + } +} + +impl fmt::Display for ConfigKey { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.to_config().fmt(f) + } +} + +/// Internal error for serde errors. +#[derive(Debug)] +pub struct ConfigError { + error: failure::Error, + definition: Option, +} + +impl ConfigError { + fn new(message: String, definition: Definition) -> ConfigError { + ConfigError { + error: failure::err_msg(message), + definition: Some(definition), + } + } + + fn expected(key: &str, expected: &str, found: &ConfigValue) -> ConfigError { + ConfigError { + error: failure::format_err!( + "`{}` expected {}, but found a {}", + key, + expected, + found.desc() + ), + definition: Some(Definition::Path(found.definition_path().to_path_buf())), + } + } + + fn missing(key: &str) -> ConfigError { + ConfigError { + error: failure::format_err!("missing config key `{}`", key), + definition: None, + } + } + + fn with_key_context(self, key: &str, definition: Definition) -> ConfigError { + ConfigError { + error: failure::format_err!("could not load config key `{}`: {}", key, self), + definition: Some(definition), + } + } +} + +impl std::error::Error for ConfigError { +} + +// Future note: currently, we cannot override `Fail::cause` (due to +// specialization) so we have no way to return the underlying causes. In the +// future, once this limitation is lifted, this should instead implement +// `cause` and avoid doing the cause formatting here. +impl fmt::Display for ConfigError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let message = self + .error + .iter_chain() + .map(|e| e.to_string()) + .collect::>() + .join("\nCaused by:\n "); + if let Some(ref definition) = self.definition { + write!(f, "error in {}: {}", definition, message) + } else { + message.fmt(f) + } + } +} + +impl de::Error for ConfigError { + fn custom(msg: T) -> Self { + ConfigError { + error: failure::err_msg(msg.to_string()), + definition: None, + } + } +} + +impl From for ConfigError { + fn from(error: failure::Error) -> Self { + ConfigError { + error, + definition: None, + } + } +} + +/// Serde deserializer used to convert config values to a target type using +/// `Config::get`. +pub struct Deserializer<'config> { + config: &'config Config, + key: ConfigKey, +} + +macro_rules! deserialize_method { + ($method:ident, $visit:ident, $getter:ident) => { + fn $method(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + let v = self.config.$getter(&self.key)?.ok_or_else(|| + ConfigError::missing(&self.key.to_config()))?; + let Value{val, definition} = v; + let res: Result = visitor.$visit(val); + res.map_err(|e| e.with_key_context(&self.key.to_config(), definition)) + } + } +} + +impl<'de, 'config> de::Deserializer<'de> for Deserializer<'config> { + type Error = ConfigError; + + fn deserialize_any(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + // Future note: If you ever need to deserialize a non-self describing + // map type, this should implement a starts_with check (similar to how + // ConfigMapAccess does). + if let Some(v) = self.config.env.get(&self.key.to_env()) { + let res: Result = if v == "true" || v == "false" { + visitor.visit_bool(v.parse().unwrap()) + } else if let Ok(v) = v.parse::() { + visitor.visit_i64(v) + } else if self.config.cli_unstable().advanced_env + && v.starts_with('[') + && v.ends_with(']') + { + visitor.visit_seq(ConfigSeqAccess::new(self.config, &self.key)?) + } else { + visitor.visit_string(v.clone()) + }; + return res.map_err(|e| { + e.with_key_context( + &self.key.to_config(), + Definition::Environment(self.key.to_env()), + ) + }); + } + + let o_cv = self.config.get_cv(&self.key.to_config())?; + if let Some(cv) = o_cv { + let res: (Result, PathBuf) = match cv { + CV::Integer(i, path) => (visitor.visit_i64(i), path), + CV::String(s, path) => (visitor.visit_string(s), path), + CV::List(_, path) => ( + visitor.visit_seq(ConfigSeqAccess::new(self.config, &self.key)?), + path, + ), + CV::Table(_, path) => ( + visitor.visit_map(ConfigMapAccess::new_map(self.config, self.key.clone())?), + path, + ), + CV::Boolean(b, path) => (visitor.visit_bool(b), path), + }; + let (res, path) = res; + return res + .map_err(|e| e.with_key_context(&self.key.to_config(), Definition::Path(path))); + } + Err(ConfigError::missing(&self.key.to_config())) + } + + deserialize_method!(deserialize_bool, visit_bool, get_bool_priv); + deserialize_method!(deserialize_i8, visit_i64, get_integer); + deserialize_method!(deserialize_i16, visit_i64, get_integer); + deserialize_method!(deserialize_i32, visit_i64, get_integer); + deserialize_method!(deserialize_i64, visit_i64, get_integer); + deserialize_method!(deserialize_u8, visit_i64, get_integer); + deserialize_method!(deserialize_u16, visit_i64, get_integer); + deserialize_method!(deserialize_u32, visit_i64, get_integer); + deserialize_method!(deserialize_u64, visit_i64, get_integer); + deserialize_method!(deserialize_string, visit_string, get_string_priv); + + fn deserialize_option(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + if self.config.has_key(&self.key) { + visitor.visit_some(self) + } else { + // Treat missing values as `None`. + visitor.visit_none() + } + } + + fn deserialize_struct( + self, + _name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_map(ConfigMapAccess::new_struct(self.config, self.key, fields)?) + } + + fn deserialize_map(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_map(ConfigMapAccess::new_map(self.config, self.key)?) + } + + fn deserialize_seq(self, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_seq(ConfigSeqAccess::new(self.config, &self.key)?) + } + + fn deserialize_tuple(self, _len: usize, visitor: V) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_seq(ConfigSeqAccess::new(self.config, &self.key)?) + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + visitor.visit_seq(ConfigSeqAccess::new(self.config, &self.key)?) + } + + fn deserialize_newtype_struct( + self, + name: &'static str, + visitor: V, + ) -> Result + where + V: de::Visitor<'de>, + { + if name == "ConfigRelativePath" { + match self.config.get_string_priv(&self.key)? { + Some(v) => { + let path = v + .definition + .root(self.config) + .join(v.val) + .display() + .to_string(); + visitor.visit_newtype_struct(path.into_deserializer()) + } + None => Err(ConfigError::missing(&self.key.to_config())), + } + } else { + visitor.visit_newtype_struct(self) + } + } + + // These aren't really supported, yet. + serde::forward_to_deserialize_any! { + f32 f64 char str bytes + byte_buf unit unit_struct + enum identifier ignored_any + } +} + +struct ConfigMapAccess<'config> { + config: &'config Config, + key: ConfigKey, + set_iter: as IntoIterator>::IntoIter, + next: Option, +} + +impl<'config> ConfigMapAccess<'config> { + fn new_map( + config: &'config Config, + key: ConfigKey, + ) -> Result, ConfigError> { + let mut set = HashSet::new(); + if let Some(mut v) = config.get_table(&key.to_config())? { + // `v: Value>` + for (key, _value) in v.val.drain() { + set.insert(ConfigKeyPart::CasePart(key)); + } + } + if config.cli_unstable().advanced_env { + // `CARGO_PROFILE_DEV_OVERRIDES_` + let env_pattern = format!("{}_", key.to_env()); + for env_key in config.env.keys() { + if env_key.starts_with(&env_pattern) { + // `CARGO_PROFILE_DEV_OVERRIDES_bar_OPT_LEVEL = 3` + let rest = &env_key[env_pattern.len()..]; + // `rest = bar_OPT_LEVEL` + let part = rest.splitn(2, '_').next().unwrap(); + // `part = "bar"` + set.insert(ConfigKeyPart::CasePart(part.to_string())); + } + } + } + Ok(ConfigMapAccess { + config, + key, + set_iter: set.into_iter(), + next: None, + }) + } + + fn new_struct( + config: &'config Config, + key: ConfigKey, + fields: &'static [&'static str], + ) -> Result, ConfigError> { + let mut set = HashSet::new(); + for field in fields { + set.insert(ConfigKeyPart::Part(field.to_string())); + } + if let Some(mut v) = config.get_table(&key.to_config())? { + for (t_key, value) in v.val.drain() { + let part = ConfigKeyPart::Part(t_key); + if !set.contains(&part) { + config.shell().warn(format!( + "unused key `{}` in config file `{}`", + key.join(part).to_config(), + value.definition_path().display() + ))?; + } + } + } + Ok(ConfigMapAccess { + config, + key, + set_iter: set.into_iter(), + next: None, + }) + } +} + +impl<'de, 'config> de::MapAccess<'de> for ConfigMapAccess<'config> { + type Error = ConfigError; + + fn next_key_seed(&mut self, seed: K) -> Result, Self::Error> + where + K: de::DeserializeSeed<'de>, + { + match self.set_iter.next() { + Some(key) => { + let de_key = key.to_config(); + self.next = Some(key); + seed.deserialize(de_key.into_deserializer()).map(Some) + } + None => Ok(None), + } + } + + fn next_value_seed(&mut self, seed: V) -> Result + where + V: de::DeserializeSeed<'de>, + { + let next_key = self.next.take().expect("next field missing"); + let next_key = self.key.join(next_key); + seed.deserialize(Deserializer { + config: self.config, + key: next_key, + }) + } +} + +struct ConfigSeqAccess { + list_iter: vec::IntoIter<(String, Definition)>, +} + +impl ConfigSeqAccess { + fn new(config: &Config, key: &ConfigKey) -> Result { + let mut res = Vec::new(); + if let Some(v) = config.get_list(&key.to_config())? { + for (s, path) in v.val { + res.push((s, Definition::Path(path))); + } + } + + if config.cli_unstable().advanced_env { + // Parse an environment string as a TOML array. + let env_key = key.to_env(); + let def = Definition::Environment(env_key.clone()); + if let Some(v) = config.env.get(&env_key) { + if !(v.starts_with('[') && v.ends_with(']')) { + return Err(ConfigError::new( + format!("should have TOML list syntax, found `{}`", v), + def, + )); + } + let temp_key = key.last().to_env(); + let toml_s = format!("{}={}", temp_key, v); + let toml_v: toml::Value = toml::de::from_str(&toml_s).map_err(|e| { + ConfigError::new(format!("could not parse TOML list: {}", e), def.clone()) + })?; + let values = toml_v + .as_table() + .unwrap() + .get(&temp_key) + .unwrap() + .as_array() + .expect("env var was not array"); + for value in values { + // TODO: support other types. + let s = value.as_str().ok_or_else(|| { + ConfigError::new( + format!("expected string, found {}", value.type_str()), + def.clone(), + ) + })?; + res.push((s.to_string(), def.clone())); + } + } + } + Ok(ConfigSeqAccess { + list_iter: res.into_iter(), + }) + } +} + +impl<'de> de::SeqAccess<'de> for ConfigSeqAccess { + type Error = ConfigError; + + fn next_element_seed(&mut self, seed: T) -> Result, Self::Error> + where + T: de::DeserializeSeed<'de>, + { + match self.list_iter.next() { + // TODO: add `def` to error? + Some((value, _def)) => seed.deserialize(value.into_deserializer()).map(Some), + None => Ok(None), + } + } +} + +/// Use with the `get` API to fetch a string that will be converted to a +/// `PathBuf`. Relative paths are converted to absolute paths based on the +/// location of the config file. +#[derive(Debug, Eq, PartialEq, Clone, Deserialize)] +pub struct ConfigRelativePath(PathBuf); + +impl ConfigRelativePath { + pub fn path(self) -> PathBuf { + self.0 + } +} + +#[derive(Eq, PartialEq, Clone)] +pub enum ConfigValue { + Integer(i64, PathBuf), + String(String, PathBuf), + List(Vec<(String, PathBuf)>, PathBuf), + Table(HashMap, PathBuf), + Boolean(bool, PathBuf), +} + +pub struct Value { + pub val: T, + pub definition: Definition, +} + +pub type OptValue = Option>; + +#[derive(Clone, Debug)] +pub enum Definition { + Path(PathBuf), + Environment(String), +} + +impl fmt::Debug for ConfigValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + CV::Integer(i, ref path) => write!(f, "{} (from {})", i, path.display()), + CV::Boolean(b, ref path) => write!(f, "{} (from {})", b, path.display()), + CV::String(ref s, ref path) => write!(f, "{} (from {})", s, path.display()), + CV::List(ref list, ref path) => { + write!(f, "[")?; + for (i, &(ref s, ref path)) in list.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{} (from {})", s, path.display())?; + } + write!(f, "] (from {})", path.display()) + } + CV::Table(ref table, _) => write!(f, "{:?}", table), + } + } +} + +impl ConfigValue { + fn from_toml(path: &Path, toml: toml::Value) -> CargoResult { + match toml { + toml::Value::String(val) => Ok(CV::String(val, path.to_path_buf())), + toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.to_path_buf())), + toml::Value::Integer(i) => Ok(CV::Integer(i, path.to_path_buf())), + toml::Value::Array(val) => Ok(CV::List( + val.into_iter() + .map(|toml| match toml { + toml::Value::String(val) => Ok((val, path.to_path_buf())), + v => failure::bail!("expected string but found {} in list", v.type_str()), + }) + .collect::>()?, + path.to_path_buf(), + )), + toml::Value::Table(val) => Ok(CV::Table( + val.into_iter() + .map(|(key, value)| { + let value = CV::from_toml(path, value) + .chain_err(|| format!("failed to parse key `{}`", key))?; + Ok((key, value)) + }) + .collect::>()?, + path.to_path_buf(), + )), + v => failure::bail!( + "found TOML configuration value of unknown type `{}`", + v.type_str() + ), + } + } + + fn into_toml(self) -> toml::Value { + match self { + CV::Boolean(s, _) => toml::Value::Boolean(s), + CV::String(s, _) => toml::Value::String(s), + CV::Integer(i, _) => toml::Value::Integer(i), + CV::List(l, _) => { + toml::Value::Array(l.into_iter().map(|(s, _)| toml::Value::String(s)).collect()) + } + CV::Table(l, _) => { + toml::Value::Table(l.into_iter().map(|(k, v)| (k, v.into_toml())).collect()) + } + } + } + + fn merge(&mut self, from: ConfigValue) -> CargoResult<()> { + match (self, from) { + (&mut CV::List(ref mut old, _), CV::List(ref mut new, _)) => { + let new = mem::replace(new, Vec::new()); + old.extend(new.into_iter()); + } + (&mut CV::Table(ref mut old, _), CV::Table(ref mut new, _)) => { + let new = mem::replace(new, HashMap::new()); + for (key, value) in new { + match old.entry(key.clone()) { + Occupied(mut entry) => { + let path = value.definition_path().to_path_buf(); + let entry = entry.get_mut(); + entry.merge(value).chain_err(|| { + format!( + "failed to merge key `{}` between \ + files:\n \ + file 1: {}\n \ + file 2: {}", + key, + entry.definition_path().display(), + path.display() + ) + })?; + } + Vacant(entry) => { + entry.insert(value); + } + }; + } + } + // Allow switching types except for tables or arrays. + (expected @ &mut CV::List(_, _), found) + | (expected @ &mut CV::Table(_, _), found) + | (expected, found @ CV::List(_, _)) + | (expected, found @ CV::Table(_, _)) => { + return Err(internal(format!( + "expected {}, but found {}", + expected.desc(), + found.desc() + ))); + } + _ => {} + } + + Ok(()) + } + + pub fn i64(&self, key: &str) -> CargoResult<(i64, &Path)> { + match *self { + CV::Integer(i, ref p) => Ok((i, p)), + _ => self.expected("integer", key), + } + } + + pub fn string(&self, key: &str) -> CargoResult<(&str, &Path)> { + match *self { + CV::String(ref s, ref p) => Ok((s, p)), + _ => self.expected("string", key), + } + } + + pub fn table(&self, key: &str) -> CargoResult<(&HashMap, &Path)> { + match *self { + CV::Table(ref table, ref p) => Ok((table, p)), + _ => self.expected("table", key), + } + } + + pub fn list(&self, key: &str) -> CargoResult<&[(String, PathBuf)]> { + match *self { + CV::List(ref list, _) => Ok(list), + _ => self.expected("list", key), + } + } + + pub fn boolean(&self, key: &str) -> CargoResult<(bool, &Path)> { + match *self { + CV::Boolean(b, ref p) => Ok((b, p)), + _ => self.expected("bool", key), + } + } + + pub fn desc(&self) -> &'static str { + match *self { + CV::Table(..) => "table", + CV::List(..) => "array", + CV::String(..) => "string", + CV::Boolean(..) => "boolean", + CV::Integer(..) => "integer", + } + } + + pub fn definition_path(&self) -> &Path { + match *self { + CV::Boolean(_, ref p) + | CV::Integer(_, ref p) + | CV::String(_, ref p) + | CV::List(_, ref p) + | CV::Table(_, ref p) => p, + } + } + + fn expected(&self, wanted: &str, key: &str) -> CargoResult { + failure::bail!( + "expected a {}, but found a {} for `{}` in {}", + wanted, + self.desc(), + key, + self.definition_path().display() + ) + } +} + +impl Definition { + pub fn root<'a>(&'a self, config: &'a Config) -> &'a Path { + match *self { + Definition::Path(ref p) => p.parent().unwrap().parent().unwrap(), + Definition::Environment(_) => config.cwd(), + } + } +} + +impl fmt::Display for Definition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Definition::Path(ref p) => p.display().fmt(f), + Definition::Environment(ref key) => write!(f, "environment variable `{}`", key), + } + } +} + +pub fn homedir(cwd: &Path) -> Option { + ::home::cargo_home_with_cwd(cwd).ok() +} + +fn walk_tree(pwd: &Path, home: &Path, mut walk: F) -> CargoResult<()> +where + F: FnMut(&Path) -> CargoResult<()>, +{ + let mut stash: HashSet = HashSet::new(); + + for current in paths::ancestors(pwd) { + let possible = current.join(".cargo").join("config"); + if fs::metadata(&possible).is_ok() { + walk(&possible)?; + stash.insert(possible); + } + } + + // Once we're done, also be sure to walk the home directory even if it's not + // in our history to be sure we pick up that standard location for + // information. + let config = home.join("config"); + if !stash.contains(&config) && fs::metadata(&config).is_ok() { + walk(&config)?; + } + + Ok(()) +} + +pub fn save_credentials(cfg: &Config, token: String, registry: Option) -> CargoResult<()> { + let mut file = { + cfg.home_path.create_dir()?; + cfg.home_path + .open_rw(Path::new("credentials"), cfg, "credentials' config file")? + }; + + let (key, value) = { + let key = "token".to_string(); + let value = ConfigValue::String(token, file.path().to_path_buf()); + let mut map = HashMap::new(); + map.insert(key, value); + let table = CV::Table(map, file.path().to_path_buf()); + + if let Some(registry) = registry { + let mut map = HashMap::new(); + map.insert(registry, table); + ( + "registries".into(), + CV::Table(map, file.path().to_path_buf()), + ) + } else { + ("registry".into(), table) + } + }; + + let mut contents = String::new(); + file.read_to_string(&mut contents).chain_err(|| { + format!( + "failed to read configuration file `{}`", + file.path().display() + ) + })?; + + let mut toml = cargo_toml::parse(&contents, file.path(), cfg)?; + + // Move the old token location to the new one. + if let Some(token) = toml.as_table_mut().unwrap().remove("token") { + let mut map = HashMap::new(); + map.insert("token".to_string(), token); + toml.as_table_mut() + .unwrap() + .insert("registry".into(), map.into()); + } + + toml.as_table_mut().unwrap().insert(key, value.into_toml()); + + let contents = toml.to_string(); + file.seek(SeekFrom::Start(0))?; + file.write_all(contents.as_bytes())?; + file.file().set_len(contents.len() as u64)?; + set_permissions(file.file(), 0o600)?; + + return Ok(()); + + #[cfg(unix)] + fn set_permissions(file: &File, mode: u32) -> CargoResult<()> { + use std::os::unix::fs::PermissionsExt; + + let mut perms = file.metadata()?.permissions(); + perms.set_mode(mode); + file.set_permissions(perms)?; + Ok(()) + } + + #[cfg(not(unix))] + #[allow(unused)] + fn set_permissions(file: &File, mode: u32) -> CargoResult<()> { + Ok(()) + } +} diff --git a/src/cargo/util/dependency_queue.rs b/src/cargo/util/dependency_queue.rs new file mode 100644 index 000000000..f8ef19e8f --- /dev/null +++ b/src/cargo/util/dependency_queue.rs @@ -0,0 +1,233 @@ +//! A graph-like structure used to represent a set of dependencies and in what +//! order they should be built. +//! +//! This structure is used to store the dependency graph and dynamically update +//! it to figure out when a dependency should be built. + +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; + +pub use self::Freshness::{Dirty, Fresh}; + +#[derive(Debug)] +pub struct DependencyQueue { + /// A list of all known keys to build. + /// + /// The value of the hash map is list of dependencies which still need to be + /// built before the package can be built. Note that the set is dynamically + /// updated as more dependencies are built. + dep_map: HashMap, V)>, + + /// A reverse mapping of a package to all packages that depend on that + /// package. + /// + /// This map is statically known and does not get updated throughout the + /// lifecycle of the DependencyQueue. + reverse_dep_map: HashMap>, + + /// A set of dirty packages. + /// + /// Packages may become dirty over time if their dependencies are rebuilt. + dirty: HashSet, + + /// The packages which are currently being built, waiting for a call to + /// `finish`. + pending: HashSet, + + /// Topological depth of each key + depth: HashMap, +} + +/// Indication of the freshness of a package. +/// +/// A fresh package does not necessarily need to be rebuilt (unless a dependency +/// was also rebuilt), and a dirty package must always be rebuilt. +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum Freshness { + Fresh, + Dirty, +} + +impl Freshness { + pub fn combine(self, other: Freshness) -> Freshness { + match self { + Fresh => other, + Dirty => Dirty, + } + } +} + +impl Default for DependencyQueue { + fn default() -> DependencyQueue { + DependencyQueue::new() + } +} + +impl DependencyQueue { + /// Creates a new dependency queue with 0 packages. + pub fn new() -> DependencyQueue { + DependencyQueue { + dep_map: HashMap::new(), + reverse_dep_map: HashMap::new(), + dirty: HashSet::new(), + pending: HashSet::new(), + depth: HashMap::new(), + } + } + + /// Adds a new package to this dependency queue. + /// + /// It is assumed that any dependencies of this package will eventually also + /// be added to the dependency queue. + pub fn queue(&mut self, fresh: Freshness, key: &K, value: V, dependencies: &[K]) -> &mut V { + let slot = match self.dep_map.entry(key.clone()) { + Occupied(v) => return &mut v.into_mut().1, + Vacant(v) => v, + }; + + if fresh == Dirty { + self.dirty.insert(key.clone()); + } + + let mut my_dependencies = HashSet::new(); + for dep in dependencies { + my_dependencies.insert(dep.clone()); + let rev = self + .reverse_dep_map + .entry(dep.clone()) + .or_insert_with(HashSet::new); + rev.insert(key.clone()); + } + &mut slot.insert((my_dependencies, value)).1 + } + + /// All nodes have been added, calculate some internal metadata and prepare + /// for `dequeue`. + pub fn queue_finished(&mut self) { + for key in self.dep_map.keys() { + depth(key, &self.reverse_dep_map, &mut self.depth); + } + + fn depth( + key: &K, + map: &HashMap>, + results: &mut HashMap, + ) -> usize { + const IN_PROGRESS: usize = !0; + + if let Some(&depth) = results.get(key) { + assert_ne!(depth, IN_PROGRESS, "cycle in DependencyQueue"); + return depth; + } + + results.insert(key.clone(), IN_PROGRESS); + + let depth = 1 + map + .get(&key) + .into_iter() + .flat_map(|it| it) + .map(|dep| depth(dep, map, results)) + .max() + .unwrap_or(0); + + *results.get_mut(key).unwrap() = depth; + + depth + } + } + + /// Dequeues a package that is ready to be built. + /// + /// A package is ready to be built when it has 0 un-built dependencies. If + /// `None` is returned then no packages are ready to be built. + pub fn dequeue(&mut self) -> Option<(Freshness, K, V)> { + // Look at all our crates and find everything that's ready to build (no + // deps). After we've got that candidate set select the one which has + // the maximum depth in the dependency graph. This way we should + // hopefully keep CPUs hottest the longest by ensuring that long + // dependency chains are scheduled early on in the build process and the + // leafs higher in the tree can fill in the cracks later. + // + // TODO: it'd be best here to throw in a heuristic of crate size as + // well. For example how long did this crate historically take to + // compile? How large is its source code? etc. + let next = self + .dep_map + .iter() + .filter(|&(_, &(ref deps, _))| deps.is_empty()) + .map(|(key, _)| key.clone()) + .max_by_key(|k| self.depth[k]); + let key = match next { + Some(key) => key, + None => return None, + }; + let (_, data) = self.dep_map.remove(&key).unwrap(); + let fresh = if self.dirty.contains(&key) { + Dirty + } else { + Fresh + }; + self.pending.insert(key.clone()); + Some((fresh, key, data)) + } + + /// Returns `true` if there are remaining packages to be built. + pub fn is_empty(&self) -> bool { + self.dep_map.is_empty() && self.pending.is_empty() + } + + /// Returns the number of remaining packages to be built. + pub fn len(&self) -> usize { + self.dep_map.len() + self.pending.len() + } + + /// Indicate that a package has been built. + /// + /// This function will update the dependency queue with this information, + /// possibly allowing the next invocation of `dequeue` to return a package. + pub fn finish(&mut self, key: &K, fresh: Freshness) { + assert!(self.pending.remove(key)); + let reverse_deps = match self.reverse_dep_map.get(key) { + Some(deps) => deps, + None => return, + }; + for dep in reverse_deps.iter() { + if fresh == Dirty { + self.dirty.insert(dep.clone()); + } + assert!(self.dep_map.get_mut(dep).unwrap().0.remove(key)); + } + } +} + +#[cfg(test)] +mod test { + use super::{DependencyQueue, Freshness}; + + #[test] + fn deep_first() { + let mut q = DependencyQueue::new(); + + q.queue(Freshness::Fresh, &1, (), &[]); + q.queue(Freshness::Fresh, &2, (), &[1]); + q.queue(Freshness::Fresh, &3, (), &[]); + q.queue(Freshness::Fresh, &4, (), &[2, 3]); + q.queue(Freshness::Fresh, &5, (), &[4, 3]); + q.queue_finished(); + + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 1, ()))); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 3, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&3, Freshness::Fresh); + assert_eq!(q.dequeue(), None); + q.finish(&1, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 2, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&2, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 4, ()))); + assert_eq!(q.dequeue(), None); + q.finish(&4, Freshness::Fresh); + assert_eq!(q.dequeue(), Some((Freshness::Fresh, 5, ()))); + } +} diff --git a/src/cargo/util/diagnostic_server.rs b/src/cargo/util/diagnostic_server.rs new file mode 100644 index 000000000..98ca15b60 --- /dev/null +++ b/src/cargo/util/diagnostic_server.rs @@ -0,0 +1,293 @@ +//! A small TCP server to handle collection of diagnostics information in a +//! cross-platform way for the `cargo fix` command. + +use std::collections::HashSet; +use std::env; +use std::io::{BufReader, Read, Write}; +use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread::{self, JoinHandle}; + +use failure::{Error, ResultExt}; +use log::warn; +use serde::{Deserialize, Serialize}; + +use crate::util::errors::CargoResult; +use crate::util::{Config, ProcessBuilder}; + +const DIAGNOSICS_SERVER_VAR: &str = "__CARGO_FIX_DIAGNOSTICS_SERVER"; +const PLEASE_REPORT_THIS_BUG: &str = + "\ + This likely indicates a bug in either rustc or cargo itself,\n\ + and we would appreciate a bug report! You're likely to see \n\ + a number of compiler warnings after this message which cargo\n\ + attempted to fix but failed. If you could open an issue at\n\ + https://github.com/rust-lang/rust/issues\n\ + quoting the full output of this command we'd be very appreciative!\n\ + Note that you may be able to make some more progress in the near-term\n\ + fixing code with the `--broken-code` flag\n\n\ + "; + +#[derive(Deserialize, Serialize)] +pub enum Message { + Fixing { + file: String, + fixes: u32, + }, + FixFailed { + files: Vec, + krate: Option, + errors: Vec, + }, + ReplaceFailed { + file: String, + message: String, + }, + EditionAlreadyEnabled { + file: String, + edition: String, + }, + IdiomEditionMismatch { + file: String, + idioms: String, + edition: Option, + }, +} + +impl Message { + pub fn post(&self) -> Result<(), Error> { + let addr = + env::var(DIAGNOSICS_SERVER_VAR).context("diagnostics collector misconfigured")?; + let mut client = + TcpStream::connect(&addr).context("failed to connect to parent diagnostics target")?; + + let s = serde_json::to_string(self).context("failed to serialize message")?; + client + .write_all(s.as_bytes()) + .context("failed to write message to diagnostics target")?; + client + .shutdown(Shutdown::Write) + .context("failed to shutdown")?; + + let mut tmp = Vec::new(); + client + .read_to_end(&mut tmp) + .context("failed to receive a disconnect")?; + + Ok(()) + } +} + +pub struct DiagnosticPrinter<'a> { + config: &'a Config, + edition_already_enabled: HashSet, + idiom_mismatch: HashSet, +} + +impl<'a> DiagnosticPrinter<'a> { + pub fn new(config: &'a Config) -> DiagnosticPrinter<'a> { + DiagnosticPrinter { + config, + edition_already_enabled: HashSet::new(), + idiom_mismatch: HashSet::new(), + } + } + + pub fn print(&mut self, msg: &Message) -> CargoResult<()> { + match msg { + Message::Fixing { file, fixes } => { + let msg = if *fixes == 1 { "fix" } else { "fixes" }; + let msg = format!("{} ({} {})", file, fixes, msg); + self.config.shell().status("Fixing", msg) + } + Message::ReplaceFailed { file, message } => { + let msg = format!("error applying suggestions to `{}`\n", file); + self.config.shell().warn(&msg)?; + write!( + self.config.shell().err(), + "The full error message was:\n\n> {}\n\n", + message, + )?; + write!(self.config.shell().err(), "{}", PLEASE_REPORT_THIS_BUG)?; + Ok(()) + } + Message::FixFailed { + files, + krate, + errors, + } => { + if let Some(ref krate) = *krate { + self.config.shell().warn(&format!( + "failed to automatically apply fixes suggested by rustc \ + to crate `{}`", + krate, + ))?; + } else { + self.config + .shell() + .warn("failed to automatically apply fixes suggested by rustc")?; + } + if !files.is_empty() { + writeln!( + self.config.shell().err(), + "\nafter fixes were automatically applied the compiler \ + reported errors within these files:\n" + )?; + for file in files { + writeln!(self.config.shell().err(), " * {}", file)?; + } + writeln!(self.config.shell().err())?; + } + write!(self.config.shell().err(), "{}", PLEASE_REPORT_THIS_BUG)?; + if !errors.is_empty() { + writeln!( + self.config.shell().err(), + "The following errors were reported:" + )?; + for error in errors { + write!(self.config.shell().err(), "{}", error)?; + if !error.ends_with('\n') { + writeln!(self.config.shell().err())?; + } + } + } + writeln!( + self.config.shell().err(), + "Original diagnostics will follow.\n" + )?; + Ok(()) + } + Message::EditionAlreadyEnabled { file, edition } => { + // Like above, only warn once per file + if !self.edition_already_enabled.insert(file.clone()) { + return Ok(()); + } + + let msg = format!( + "\ +cannot prepare for the {} edition when it is enabled, so cargo cannot +automatically fix errors in `{}` + +To prepare for the {0} edition you should first remove `edition = '{0}'` from +your `Cargo.toml` and then rerun this command. Once all warnings have been fixed +then you can re-enable the `edition` key in `Cargo.toml`. For some more +information about transitioning to the {0} edition see: + + https://rust-lang-nursery.github.io/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html +", + edition, + file, + ); + self.config.shell().error(&msg)?; + Ok(()) + } + Message::IdiomEditionMismatch { + file, + idioms, + edition, + } => { + // Same as above + if !self.idiom_mismatch.insert(file.clone()) { + return Ok(()); + } + self.config.shell().error(&format!( + "\ +cannot migrate to the idioms of the {} edition for `{}` +because it is compiled {}, which doesn't match {0} + +consider migrating to the {0} edition by adding `edition = '{0}'` to +`Cargo.toml` and then rerunning this command; a more detailed transition +guide can be found at + + https://rust-lang-nursery.github.io/edition-guide/editions/transitioning-an-existing-project-to-a-new-edition.html +", + idioms, + file, + match edition { + Some(s) => format!("with the {} edition", s), + None => "without an edition".to_string(), + }, + ))?; + Ok(()) + } + } + } +} + +#[derive(Debug)] +pub struct RustfixDiagnosticServer { + listener: TcpListener, + addr: SocketAddr, +} + +pub struct StartedServer { + addr: SocketAddr, + done: Arc, + thread: Option>, +} + +impl RustfixDiagnosticServer { + pub fn new() -> Result { + let listener = TcpListener::bind("127.0.0.1:0") + .with_context(|_| "failed to bind TCP listener to manage locking")?; + let addr = listener.local_addr()?; + + Ok(RustfixDiagnosticServer { listener, addr }) + } + + pub fn configure(&self, process: &mut ProcessBuilder) { + process.env(DIAGNOSICS_SERVER_VAR, self.addr.to_string()); + } + + pub fn start(self, on_message: F) -> Result + where + F: Fn(Message) + Send + 'static, + { + let addr = self.addr; + let done = Arc::new(AtomicBool::new(false)); + let done2 = done.clone(); + let thread = thread::spawn(move || { + self.run(&on_message, &done2); + }); + + Ok(StartedServer { + addr, + thread: Some(thread), + done, + }) + } + + fn run(self, on_message: &dyn Fn(Message), done: &AtomicBool) { + while let Ok((client, _)) = self.listener.accept() { + if done.load(Ordering::SeqCst) { + break; + } + let mut client = BufReader::new(client); + let mut s = String::new(); + if let Err(e) = client.read_to_string(&mut s) { + warn!("diagnostic server failed to read: {}", e); + } else { + match serde_json::from_str(&s) { + Ok(message) => on_message(message), + Err(e) => warn!("invalid diagnostics message: {}", e), + } + } + // The client should be kept alive until after `on_message` is + // called to ensure that the client doesn't exit too soon (and + // Message::Finish getting posted before Message::FixDiagnostic). + drop(client); + } + } +} + +impl Drop for StartedServer { + fn drop(&mut self) { + self.done.store(true, Ordering::SeqCst); + // Ignore errors here as this is largely best-effort + if TcpStream::connect(&self.addr).is_err() { + return; + } + drop(self.thread.take().unwrap().join()); + } +} diff --git a/src/cargo/util/errors.rs b/src/cargo/util/errors.rs new file mode 100644 index 000000000..a3f071c9f --- /dev/null +++ b/src/cargo/util/errors.rs @@ -0,0 +1,389 @@ +#![allow(unknown_lints)] + +use std::fmt; +use std::path::PathBuf; +use std::process::{ExitStatus, Output}; +use std::str; + +use clap; +use failure::{Context, Error, Fail}; +use log::trace; + +use crate::core::{TargetKind, Workspace}; + +pub type CargoResult = failure::Fallible; // Alex's body isn't quite ready to give up "Result" + +pub trait CargoResultExt { + fn chain_err(self, f: F) -> Result> + where + F: FnOnce() -> D, + D: fmt::Display + Send + Sync + 'static; +} + +impl CargoResultExt for Result +where + E: Into, +{ + fn chain_err(self, f: F) -> Result> + where + F: FnOnce() -> D, + D: fmt::Display + Send + Sync + 'static, + { + self.map_err(|failure| { + let err = failure.into(); + let context = f(); + trace!("error: {}", err); + trace!("\tcontext: {}", context); + err.context(context) + }) + } +} + +#[derive(Debug, Fail)] +#[fail(display = "failed to get 200 response from `{}`, got {}", url, code)] +pub struct HttpNot200 { + pub code: u32, + pub url: String, +} + +pub struct Internal { + inner: Error, +} + +impl Internal { + pub fn new(inner: Error) -> Internal { + Internal { inner } + } +} + +impl Fail for Internal { + fn cause(&self) -> Option<&dyn Fail> { + self.inner.as_fail().cause() + } +} + +impl fmt::Debug for Internal { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl fmt::Display for Internal { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.inner.fmt(f) + } +} + +/// Error wrapper related to a particular manifest and providing it's path. +/// +/// This error adds no displayable info of it's own. +pub struct ManifestError { + cause: Error, + manifest: PathBuf, +} + +impl ManifestError { + pub fn new>(cause: E, manifest: PathBuf) -> Self { + Self { + cause: cause.into(), + manifest, + } + } + + pub fn manifest_path(&self) -> &PathBuf { + &self.manifest + } + + /// Returns an iterator over the `ManifestError` chain of causes. + /// + /// So if this error was not caused by another `ManifestError` this will be empty. + pub fn manifest_causes(&self) -> ManifestCauses<'_> { + ManifestCauses { current: self } + } +} + +impl Fail for ManifestError { + fn cause(&self) -> Option<&dyn Fail> { + self.cause.as_fail().cause() + } +} + +impl fmt::Debug for ManifestError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.cause.fmt(f) + } +} + +impl fmt::Display for ManifestError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.cause.fmt(f) + } +} + +/// An iterator over the `ManifestError` chain of causes. +pub struct ManifestCauses<'a> { + current: &'a ManifestError, +} + +impl<'a> Iterator for ManifestCauses<'a> { + type Item = &'a ManifestError; + + fn next(&mut self) -> Option { + self.current = self.current.cause.downcast_ref()?; + Some(self.current) + } +} + +impl<'a> ::std::iter::FusedIterator for ManifestCauses<'a> {} + +// ============================================================================= +// Process errors +#[derive(Debug, Fail)] +#[fail(display = "{}", desc)] +pub struct ProcessError { + pub desc: String, + pub exit: Option, + pub output: Option, +} + +// ============================================================================= +// Cargo test errors. + +/// Error when testcases fail +#[derive(Debug, Fail)] +#[fail(display = "{}", desc)] +pub struct CargoTestError { + pub test: Test, + pub desc: String, + pub exit: Option, + pub causes: Vec, +} + +#[derive(Debug)] +pub enum Test { + Multiple, + Doc, + UnitTest { + kind: TargetKind, + name: String, + pkg_name: String, + }, +} + +impl CargoTestError { + pub fn new(test: Test, errors: Vec) -> Self { + if errors.is_empty() { + panic!("Cannot create CargoTestError from empty Vec") + } + let desc = errors + .iter() + .map(|error| error.desc.clone()) + .collect::>() + .join("\n"); + CargoTestError { + test, + desc, + exit: errors[0].exit, + causes: errors, + } + } + + pub fn hint(&self, ws: &Workspace<'_>) -> String { + match self.test { + Test::UnitTest { + ref kind, + ref name, + ref pkg_name, + } => { + let pkg_info = if ws.members().count() > 1 && ws.is_virtual() { + format!("-p {} ", pkg_name) + } else { + String::new() + }; + + match *kind { + TargetKind::Bench => { + format!("test failed, to rerun pass '{}--bench {}'", pkg_info, name) + } + TargetKind::Bin => { + format!("test failed, to rerun pass '{}--bin {}'", pkg_info, name) + } + TargetKind::Lib(_) => format!("test failed, to rerun pass '{}--lib'", pkg_info), + TargetKind::Test => { + format!("test failed, to rerun pass '{}--test {}'", pkg_info, name) + } + TargetKind::ExampleBin | TargetKind::ExampleLib(_) => { + format!("test failed, to rerun pass '{}--example {}", pkg_info, name) + } + _ => "test failed.".into(), + } + } + Test::Doc => "test failed, to rerun pass '--doc'".into(), + _ => "test failed.".into(), + } + } +} + +// ============================================================================= +// CLI errors + +pub type CliResult = Result<(), CliError>; + +#[derive(Debug)] +pub struct CliError { + pub error: Option, + pub unknown: bool, + pub exit_code: i32, +} + +impl CliError { + pub fn new(error: failure::Error, code: i32) -> CliError { + let unknown = error.downcast_ref::().is_some(); + CliError { + error: Some(error), + exit_code: code, + unknown, + } + } + + pub fn code(code: i32) -> CliError { + CliError { + error: None, + exit_code: code, + unknown: false, + } + } +} + +impl From for CliError { + fn from(err: failure::Error) -> CliError { + CliError::new(err, 101) + } +} + +impl From for CliError { + fn from(err: clap::Error) -> CliError { + let code = if err.use_stderr() { 1 } else { 0 }; + CliError::new(err.into(), code) + } +} + +// ============================================================================= +// Construction helpers + +pub fn process_error( + msg: &str, + status: Option, + output: Option<&Output>, +) -> ProcessError { + let exit = match status { + Some(s) => status_to_string(s), + None => "never executed".to_string(), + }; + let mut desc = format!("{} ({})", &msg, exit); + + if let Some(out) = output { + match str::from_utf8(&out.stdout) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stdout\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + match str::from_utf8(&out.stderr) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stderr\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + } + + return ProcessError { + desc, + exit: status, + output: output.cloned(), + }; + + #[cfg(unix)] + fn status_to_string(status: ExitStatus) -> String { + use std::os::unix::process::*; + + if let Some(signal) = status.signal() { + let name = match signal as libc::c_int { + libc::SIGABRT => ", SIGABRT: process abort signal", + libc::SIGALRM => ", SIGALRM: alarm clock", + libc::SIGFPE => ", SIGFPE: erroneous arithmetic operation", + libc::SIGHUP => ", SIGHUP: hangup", + libc::SIGILL => ", SIGILL: illegal instruction", + libc::SIGINT => ", SIGINT: terminal interrupt signal", + libc::SIGKILL => ", SIGKILL: kill", + libc::SIGPIPE => ", SIGPIPE: write on a pipe with no one to read", + libc::SIGQUIT => ", SIGQUIT: terminal quite signal", + libc::SIGSEGV => ", SIGSEGV: invalid memory reference", + libc::SIGTERM => ", SIGTERM: termination signal", + libc::SIGBUS => ", SIGBUS: access to undefined memory", + #[cfg(not(target_os = "haiku"))] + libc::SIGSYS => ", SIGSYS: bad system call", + libc::SIGTRAP => ", SIGTRAP: trace/breakpoint trap", + _ => "", + }; + format!("signal: {}{}", signal, name) + } else { + status.to_string() + } + } + + #[cfg(windows)] + fn status_to_string(status: ExitStatus) -> String { + use winapi::shared::minwindef::DWORD; + use winapi::um::winnt::*; + + let mut base = status.to_string(); + let extra = match status.code().unwrap() as DWORD { + STATUS_ACCESS_VIOLATION => "STATUS_ACCESS_VIOLATION", + STATUS_IN_PAGE_ERROR => "STATUS_IN_PAGE_ERROR", + STATUS_INVALID_HANDLE => "STATUS_INVALID_HANDLE", + STATUS_INVALID_PARAMETER => "STATUS_INVALID_PARAMETER", + STATUS_NO_MEMORY => "STATUS_NO_MEMORY", + STATUS_ILLEGAL_INSTRUCTION => "STATUS_ILLEGAL_INSTRUCTION", + STATUS_NONCONTINUABLE_EXCEPTION => "STATUS_NONCONTINUABLE_EXCEPTION", + STATUS_INVALID_DISPOSITION => "STATUS_INVALID_DISPOSITION", + STATUS_ARRAY_BOUNDS_EXCEEDED => "STATUS_ARRAY_BOUNDS_EXCEEDED", + STATUS_FLOAT_DENORMAL_OPERAND => "STATUS_FLOAT_DENORMAL_OPERAND", + STATUS_FLOAT_DIVIDE_BY_ZERO => "STATUS_FLOAT_DIVIDE_BY_ZERO", + STATUS_FLOAT_INEXACT_RESULT => "STATUS_FLOAT_INEXACT_RESULT", + STATUS_FLOAT_INVALID_OPERATION => "STATUS_FLOAT_INVALID_OPERATION", + STATUS_FLOAT_OVERFLOW => "STATUS_FLOAT_OVERFLOW", + STATUS_FLOAT_STACK_CHECK => "STATUS_FLOAT_STACK_CHECK", + STATUS_FLOAT_UNDERFLOW => "STATUS_FLOAT_UNDERFLOW", + STATUS_INTEGER_DIVIDE_BY_ZERO => "STATUS_INTEGER_DIVIDE_BY_ZERO", + STATUS_INTEGER_OVERFLOW => "STATUS_INTEGER_OVERFLOW", + STATUS_PRIVILEGED_INSTRUCTION => "STATUS_PRIVILEGED_INSTRUCTION", + STATUS_STACK_OVERFLOW => "STATUS_STACK_OVERFLOW", + STATUS_DLL_NOT_FOUND => "STATUS_DLL_NOT_FOUND", + STATUS_ORDINAL_NOT_FOUND => "STATUS_ORDINAL_NOT_FOUND", + STATUS_ENTRYPOINT_NOT_FOUND => "STATUS_ENTRYPOINT_NOT_FOUND", + STATUS_CONTROL_C_EXIT => "STATUS_CONTROL_C_EXIT", + STATUS_DLL_INIT_FAILED => "STATUS_DLL_INIT_FAILED", + STATUS_FLOAT_MULTIPLE_FAULTS => "STATUS_FLOAT_MULTIPLE_FAULTS", + STATUS_FLOAT_MULTIPLE_TRAPS => "STATUS_FLOAT_MULTIPLE_TRAPS", + STATUS_REG_NAT_CONSUMPTION => "STATUS_REG_NAT_CONSUMPTION", + STATUS_HEAP_CORRUPTION => "STATUS_HEAP_CORRUPTION", + STATUS_STACK_BUFFER_OVERRUN => "STATUS_STACK_BUFFER_OVERRUN", + STATUS_ASSERTION_FAILURE => "STATUS_ASSERTION_FAILURE", + _ => return base, + }; + base.push_str(", "); + base.push_str(extra); + base + } +} + +pub fn internal(error: S) -> failure::Error { + _internal(&error) +} + +fn _internal(error: &dyn fmt::Display) -> failure::Error { + Internal::new(failure::format_err!("{}", error)).into() +} diff --git a/src/cargo/util/flock.rs b/src/cargo/util/flock.rs new file mode 100644 index 000000000..bc87be6c9 --- /dev/null +++ b/src/cargo/util/flock.rs @@ -0,0 +1,341 @@ +use std::fs::{self, File, OpenOptions}; +use std::io; +use std::io::{Read, Seek, SeekFrom, Write}; +use std::path::{Display, Path, PathBuf}; + +use fs2::{lock_contended_error, FileExt}; +#[allow(unused_imports)] +use libc; +use termcolor::Color::Cyan; + +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::paths; +use crate::util::Config; + +pub struct FileLock { + f: Option, + path: PathBuf, + state: State, +} + +#[derive(PartialEq, Debug)] +enum State { + Unlocked, + Shared, + Exclusive, +} + +impl FileLock { + /// Returns the underlying file handle of this lock. + pub fn file(&self) -> &File { + self.f.as_ref().unwrap() + } + + /// Returns the underlying path that this lock points to. + /// + /// Note that special care must be taken to ensure that the path is not + /// referenced outside the lifetime of this lock. + pub fn path(&self) -> &Path { + assert_ne!(self.state, State::Unlocked); + &self.path + } + + /// Returns the parent path containing this file + pub fn parent(&self) -> &Path { + assert_ne!(self.state, State::Unlocked); + self.path.parent().unwrap() + } + + /// Removes all sibling files to this locked file. + /// + /// This can be useful if a directory is locked with a sentinel file but it + /// needs to be cleared out as it may be corrupt. + pub fn remove_siblings(&self) -> CargoResult<()> { + let path = self.path(); + for entry in path.parent().unwrap().read_dir()? { + let entry = entry?; + if Some(&entry.file_name()[..]) == path.file_name() { + continue; + } + let kind = entry.file_type()?; + if kind.is_dir() { + paths::remove_dir_all(entry.path())?; + } else { + paths::remove_file(entry.path())?; + } + } + Ok(()) + } +} + +impl Read for FileLock { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.file().read(buf) + } +} + +impl Seek for FileLock { + fn seek(&mut self, to: SeekFrom) -> io::Result { + self.file().seek(to) + } +} + +impl Write for FileLock { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.file().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.file().flush() + } +} + +impl Drop for FileLock { + fn drop(&mut self) { + if self.state != State::Unlocked { + if let Some(f) = self.f.take() { + let _ = f.unlock(); + } + } + } +} + +/// A "filesystem" is intended to be a globally shared, hence locked, resource +/// in Cargo. +/// +/// The `Path` of a filesystem cannot be learned unless it's done in a locked +/// fashion, and otherwise functions on this structure are prepared to handle +/// concurrent invocations across multiple instances of Cargo. +#[derive(Clone, Debug)] +pub struct Filesystem { + root: PathBuf, +} + +impl Filesystem { + /// Creates a new filesystem to be rooted at the given path. + pub fn new(path: PathBuf) -> Filesystem { + Filesystem { root: path } + } + + /// Like `Path::join`, creates a new filesystem rooted at this filesystem + /// joined with the given path. + pub fn join>(&self, other: T) -> Filesystem { + Filesystem::new(self.root.join(other)) + } + + /// Like `Path::push`, pushes a new path component onto this filesystem. + pub fn push>(&mut self, other: T) { + self.root.push(other); + } + + /// Consumes this filesystem and returns the underlying `PathBuf`. + /// + /// Note that this is a relatively dangerous operation and should be used + /// with great caution!. + pub fn into_path_unlocked(self) -> PathBuf { + self.root + } + + /// Creates the directory pointed to by this filesystem. + /// + /// Handles errors where other Cargo processes are also attempting to + /// concurrently create this directory. + pub fn create_dir(&self) -> io::Result<()> { + fs::create_dir_all(&self.root) + } + + /// Returns an adaptor that can be used to print the path of this + /// filesystem. + pub fn display(&self) -> Display<'_> { + self.root.display() + } + + /// Opens exclusive access to a file, returning the locked version of a + /// file. + /// + /// This function will create a file at `path` if it doesn't already exist + /// (including intermediate directories), and then it will acquire an + /// exclusive lock on `path`. If the process must block waiting for the + /// lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has + /// read/write access to the underlying file. + pub fn open_rw

(&self, path: P, config: &Config, msg: &str) -> CargoResult + where + P: AsRef, + { + self.open( + path.as_ref(), + OpenOptions::new().read(true).write(true).create(true), + State::Exclusive, + config, + msg, + ) + } + + /// Opens shared access to a file, returning the locked version of a file. + /// + /// This function will fail if `path` doesn't already exist, but if it does + /// then it will acquire a shared lock on `path`. If the process must block + /// waiting for the lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has read + /// access to the underlying file. Any writes to the file will return an + /// error. + pub fn open_ro

(&self, path: P, config: &Config, msg: &str) -> CargoResult + where + P: AsRef, + { + self.open( + path.as_ref(), + OpenOptions::new().read(true), + State::Shared, + config, + msg, + ) + } + + fn open( + &self, + path: &Path, + opts: &OpenOptions, + state: State, + config: &Config, + msg: &str, + ) -> CargoResult { + let path = self.root.join(path); + + // If we want an exclusive lock then if we fail because of NotFound it's + // likely because an intermediate directory didn't exist, so try to + // create the directory and then continue. + let f = opts + .open(&path) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound && state == State::Exclusive { + fs::create_dir_all(path.parent().unwrap())?; + opts.open(&path) + } else { + Err(e) + } + }) + .chain_err(|| format!("failed to open: {}", path.display()))?; + match state { + State::Exclusive => { + acquire(config, msg, &path, &|| f.try_lock_exclusive(), &|| { + f.lock_exclusive() + })?; + } + State::Shared => { + acquire(config, msg, &path, &|| f.try_lock_shared(), &|| { + f.lock_shared() + })?; + } + State::Unlocked => {} + } + Ok(FileLock { + f: Some(f), + path, + state, + }) + } +} + +impl PartialEq for Filesystem { + fn eq(&self, other: &Path) -> bool { + self.root == other + } +} + +impl PartialEq for Path { + fn eq(&self, other: &Filesystem) -> bool { + self == other.root + } +} + +/// Acquires a lock on a file in a "nice" manner. +/// +/// Almost all long-running blocking actions in Cargo have a status message +/// associated with them as we're not sure how long they'll take. Whenever a +/// conflicted file lock happens, this is the case (we're not sure when the lock +/// will be released). +/// +/// This function will acquire the lock on a `path`, printing out a nice message +/// to the console if we have to wait for it. It will first attempt to use `try` +/// to acquire a lock on the crate, and in the case of contention it will emit a +/// status message based on `msg` to `config`'s shell, and then use `block` to +/// block waiting to acquire a lock. +/// +/// Returns an error if the lock could not be acquired or if any error other +/// than a contention error happens. +fn acquire( + config: &Config, + msg: &str, + path: &Path, + r#try: &dyn Fn() -> io::Result<()>, + block: &dyn Fn() -> io::Result<()>, +) -> CargoResult<()> { + // File locking on Unix is currently implemented via `flock`, which is known + // to be broken on NFS. We could in theory just ignore errors that happen on + // NFS, but apparently the failure mode [1] for `flock` on NFS is **blocking + // forever**, even if the "non-blocking" flag is passed! + // + // As a result, we just skip all file locks entirely on NFS mounts. That + // should avoid calling any `flock` functions at all, and it wouldn't work + // there anyway. + // + // [1]: https://github.com/rust-lang/cargo/issues/2615 + if is_on_nfs_mount(path) { + return Ok(()); + } + + match r#try() { + Ok(()) => return Ok(()), + + // In addition to ignoring NFS which is commonly not working we also + // just ignore locking on filesystems that look like they don't + // implement file locking. We detect that here via the return value of + // locking (e.g., inspecting errno). + #[cfg(unix)] + Err(ref e) if e.raw_os_error() == Some(libc::ENOTSUP) => return Ok(()), + + #[cfg(target_os = "linux")] + Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => return Ok(()), + + Err(e) => { + if e.raw_os_error() != lock_contended_error().raw_os_error() { + let e = failure::Error::from(e); + let cx = format!("failed to lock file: {}", path.display()); + return Err(e.context(cx).into()); + } + } + } + let msg = format!("waiting for file lock on {}", msg); + config.shell().status_with_color("Blocking", &msg, Cyan)?; + + block().chain_err(|| format!("failed to lock file: {}", path.display()))?; + return Ok(()); + + #[cfg(all(target_os = "linux", not(target_env = "musl")))] + fn is_on_nfs_mount(path: &Path) -> bool { + use std::ffi::CString; + use std::mem; + use std::os::unix::prelude::*; + + let path = match CString::new(path.as_os_str().as_bytes()) { + Ok(path) => path, + Err(_) => return false, + }; + + unsafe { + let mut buf: libc::statfs = mem::zeroed(); + let r = libc::statfs(path.as_ptr(), &mut buf); + + r == 0 && buf.f_type as u32 == libc::NFS_SUPER_MAGIC as u32 + } + } + + #[cfg(any(not(target_os = "linux"), target_env = "musl"))] + fn is_on_nfs_mount(_path: &Path) -> bool { + false + } +} diff --git a/src/cargo/util/graph.rs b/src/cargo/util/graph.rs new file mode 100644 index 000000000..48fc0a227 --- /dev/null +++ b/src/cargo/util/graph.rs @@ -0,0 +1,134 @@ +use std::borrow::Borrow; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::hash::Hash; + +pub struct Graph { + nodes: HashMap>, +} + +impl Graph { + pub fn new() -> Graph { + Graph { + nodes: HashMap::new(), + } + } + + pub fn add(&mut self, node: N) { + self.nodes.entry(node).or_insert_with(HashMap::new); + } + + pub fn link(&mut self, node: N, child: N) -> &mut E { + self.nodes + .entry(node) + .or_insert_with(HashMap::new) + .entry(child) + .or_insert_with(Default::default) + } + + pub fn contains(&self, k: &Q) -> bool + where + N: Borrow, + Q: Hash + Eq, + { + self.nodes.contains_key(k) + } + + pub fn edge(&self, from: &N, to: &N) -> Option<&E> { + self.nodes.get(from)?.get(to) + } + + pub fn edges(&self, from: &N) -> impl Iterator { + self.nodes.get(from).into_iter().flat_map(|x| x.iter()) + } + + /// A topological sort of the `Graph` + pub fn sort(&self) -> Vec { + let mut ret = Vec::new(); + let mut marks = HashSet::new(); + + for node in self.nodes.keys() { + self.sort_inner_visit(node, &mut ret, &mut marks); + } + + ret + } + + fn sort_inner_visit(&self, node: &N, dst: &mut Vec, marks: &mut HashSet) { + if !marks.insert(node.clone()) { + return; + } + + for child in self.nodes[node].keys() { + self.sort_inner_visit(child, dst, marks); + } + + dst.push(node.clone()); + } + + pub fn iter(&self) -> impl Iterator { + self.nodes.keys() + } + + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top<'a>(&'a self, mut pkg: &'a N) -> Vec<&'a N> { + // Note that this implementation isn't the most robust per se, we'll + // likely have to tweak this over time. For now though it works for what + // it's used for! + let mut result = vec![pkg]; + let first_pkg_depending_on = |pkg: &N, res: &[&N]| { + self.nodes + .iter() + .filter(|&(_, adjacent)| adjacent.contains_key(pkg)) + // Note that we can have "cycles" introduced through dev-dependency + // edges, so make sure we don't loop infinitely. + .find(|&(node, _)| !res.contains(&node)) + .map(|p| p.0) + }; + while let Some(p) = first_pkg_depending_on(pkg, &result) { + result.push(p); + pkg = p; + } + result + } +} + +impl Default for Graph { + fn default() -> Graph { + Graph::new() + } +} + +impl fmt::Debug for Graph { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(fmt, "Graph {{")?; + + for (n, e) in &self.nodes { + writeln!(fmt, " - {}", n)?; + + for n in e.keys() { + writeln!(fmt, " - {}", n)?; + } + } + + write!(fmt, "}}")?; + + Ok(()) + } +} + +impl PartialEq for Graph { + fn eq(&self, other: &Graph) -> bool { + self.nodes.eq(&other.nodes) + } +} +impl Eq for Graph {} + +impl Clone for Graph { + fn clone(&self) -> Graph { + Graph { + nodes: self.nodes.clone(), + } + } +} diff --git a/src/cargo/util/hex.rs b/src/cargo/util/hex.rs new file mode 100644 index 000000000..7e4dd00e9 --- /dev/null +++ b/src/cargo/util/hex.rs @@ -0,0 +1,27 @@ +#![allow(deprecated)] + +use hex; +use std::hash::{Hash, Hasher, SipHasher}; + +pub fn to_hex(num: u64) -> String { + hex::encode(&[ + (num >> 0) as u8, + (num >> 8) as u8, + (num >> 16) as u8, + (num >> 24) as u8, + (num >> 32) as u8, + (num >> 40) as u8, + (num >> 48) as u8, + (num >> 56) as u8, + ]) +} + +pub fn hash_u64(hashable: &H) -> u64 { + let mut hasher = SipHasher::new_with_keys(0, 0); + hashable.hash(&mut hasher); + hasher.finish() +} + +pub fn short_hash(hashable: &H) -> String { + to_hex(hash_u64(hashable)) +} diff --git a/src/cargo/util/important_paths.rs b/src/cargo/util/important_paths.rs new file mode 100644 index 000000000..6476c3e19 --- /dev/null +++ b/src/cargo/util/important_paths.rs @@ -0,0 +1,32 @@ +use crate::util::errors::CargoResult; +use crate::util::paths; +use std::fs; +use std::path::{Path, PathBuf}; + +/// Finds the root `Cargo.toml`. +pub fn find_root_manifest_for_wd(cwd: &Path) -> CargoResult { + let file = "Cargo.toml"; + for current in paths::ancestors(cwd) { + let manifest = current.join(file); + if fs::metadata(&manifest).is_ok() { + return Ok(manifest); + } + } + + failure::bail!( + "could not find `{}` in `{}` or any parent directory", + file, + cwd.display() + ) +} + +/// Returns the path to the `file` in `pwd`, if it exists. +pub fn find_project_manifest_exact(pwd: &Path, file: &str) -> CargoResult { + let manifest = pwd.join(file); + + if manifest.exists() { + Ok(manifest) + } else { + failure::bail!("Could not find `{}` in `{}`", file, pwd.display()) + } +} diff --git a/src/cargo/util/job.rs b/src/cargo/util/job.rs new file mode 100644 index 000000000..27b3b953f --- /dev/null +++ b/src/cargo/util/job.rs @@ -0,0 +1,142 @@ +//! Job management (mostly for windows) +//! +//! Most of the time when you're running cargo you expect Ctrl-C to actually +//! terminate the entire tree of processes in play, not just the one at the top +//! (cago). This currently works "by default" on Unix platforms because Ctrl-C +//! actually sends a signal to the *process group* rather than the parent +//! process, so everything will get torn down. On Windows, however, this does +//! not happen and Ctrl-C just kills cargo. +//! +//! To achieve the same semantics on Windows we use Job Objects to ensure that +//! all processes die at the same time. Job objects have a mode of operation +//! where when all handles to the object are closed it causes all child +//! processes associated with the object to be terminated immediately. +//! Conveniently whenever a process in the job object spawns a new process the +//! child will be associated with the job object as well. This means if we add +//! ourselves to the job object we create then everything will get torn down! + +pub use self::imp::Setup; + +pub fn setup() -> Option { + unsafe { imp::setup() } +} + +#[cfg(unix)] +mod imp { + use libc; + use std::env; + + pub type Setup = (); + + pub unsafe fn setup() -> Option<()> { + // There's a test case for the behavior of + // when-cargo-is-killed-subprocesses-are-also-killed, but that requires + // one cargo spawned to become its own session leader, so we do that + // here. + if env::var("__CARGO_TEST_SETSID_PLEASE_DONT_USE_ELSEWHERE").is_ok() { + libc::setsid(); + } + Some(()) + } +} + +#[cfg(windows)] +mod imp { + use std::io; + use std::mem; + use std::ptr; + + use log::info; + + use winapi::shared::minwindef::*; + use winapi::um::handleapi::*; + use winapi::um::jobapi2::*; + use winapi::um::processthreadsapi::*; + use winapi::um::winnt::HANDLE; + use winapi::um::winnt::*; + + pub struct Setup { + job: Handle, + } + + pub struct Handle { + inner: HANDLE, + } + + fn last_err() -> io::Error { + io::Error::last_os_error() + } + + pub unsafe fn setup() -> Option { + // Creates a new job object for us to use and then adds ourselves to it. + // Note that all errors are basically ignored in this function, + // intentionally. Job objects are "relatively new" in Windows, + // particularly the ability to support nested job objects. Older + // Windows installs don't support this ability. We probably don't want + // to force Cargo to abort in this situation or force others to *not* + // use job objects, so we instead just ignore errors and assume that + // we're otherwise part of someone else's job object in this case. + + let job = CreateJobObjectW(ptr::null_mut(), ptr::null()); + if job.is_null() { + return None; + } + let job = Handle { inner: job }; + + // Indicate that when all handles to the job object are gone that all + // process in the object should be killed. Note that this includes our + // entire process tree by default because we've added ourselves and + // our children will reside in the job once we spawn a process. + let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + let r = SetInformationJobObject( + job.inner, + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) as DWORD, + ); + if r == 0 { + return None; + } + + // Assign our process to this job object, meaning that our children will + // now live or die based on our existence. + let me = GetCurrentProcess(); + let r = AssignProcessToJobObject(job.inner, me); + if r == 0 { + return None; + } + + Some(Setup { job }) + } + + impl Drop for Setup { + fn drop(&mut self) { + // On normal exits (not ctrl-c), we don't want to kill any child + // processes. The destructor here configures our job object to + // **not** kill everything on close, then closes the job object. + unsafe { + let mut info: JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + let r = SetInformationJobObject( + self.job.inner, + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) as DWORD, + ); + if r == 0 { + info!("failed to configure job object to defaults: {}", last_err()); + } + } + } + } + + impl Drop for Handle { + fn drop(&mut self) { + unsafe { + CloseHandle(self.inner); + } + } + } +} diff --git a/src/cargo/util/lev_distance.rs b/src/cargo/util/lev_distance.rs new file mode 100644 index 000000000..034fb7287 --- /dev/null +++ b/src/cargo/util/lev_distance.rs @@ -0,0 +1,56 @@ +use std::cmp; + +pub fn lev_distance(me: &str, t: &str) -> usize { + if me.is_empty() { + return t.chars().count(); + } + if t.is_empty() { + return me.chars().count(); + } + + let mut dcol = (0..=t.len()).collect::>(); + let mut t_last = 0; + + for (i, sc) in me.chars().enumerate() { + let mut current = i; + dcol[0] = current + 1; + + for (j, tc) in t.chars().enumerate() { + let next = dcol[j + 1]; + + if sc == tc { + dcol[j + 1] = current; + } else { + dcol[j + 1] = cmp::min(current, next); + dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1; + } + + current = next; + t_last = j; + } + } + + dcol[t_last + 1] +} + +#[test] +fn test_lev_distance() { + use std::char::{from_u32, MAX}; + // Test bytelength agnosticity + for c in (0u32..MAX as u32) + .filter_map(from_u32) + .map(|i| i.to_string()) + { + assert_eq!(lev_distance(&c, &c), 0); + } + + let a = "\nMäry häd ä little lämb\n\nLittle lämb\n"; + let b = "\nMary häd ä little lämb\n\nLittle lämb\n"; + let c = "Mary häd ä little lämb\n\nLittle lämb\n"; + assert_eq!(lev_distance(a, b), 1); + assert_eq!(lev_distance(b, a), 1); + assert_eq!(lev_distance(a, c), 2); + assert_eq!(lev_distance(c, a), 2); + assert_eq!(lev_distance(b, c), 1); + assert_eq!(lev_distance(c, b), 1); +} diff --git a/src/cargo/util/lockserver.rs b/src/cargo/util/lockserver.rs new file mode 100644 index 000000000..9c4878dfc --- /dev/null +++ b/src/cargo/util/lockserver.rs @@ -0,0 +1,171 @@ +//! An implementation of IPC locks, guaranteed to be released if a process dies +//! +//! This module implements a locking server/client where the main `cargo fix` +//! process will start up a server and then all the client processes will +//! connect to it. The main purpose of this file is to enusre that each crate +//! (aka file entry point) is only fixed by one process at a time, currently +//! concurrent fixes can't happen. +//! +//! The basic design here is to use a TCP server which is pretty portable across +//! platforms. For simplicity it just uses threads as well. Clients connect to +//! the main server, inform the server what its name is, and then wait for the +//! server to give it the lock (aka write a byte). + +use std::collections::HashMap; +use std::io::{BufRead, BufReader, Read, Write}; +use std::net::{SocketAddr, TcpListener, TcpStream}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; + +use failure::{Error, ResultExt}; + +pub struct LockServer { + listener: TcpListener, + addr: SocketAddr, + threads: HashMap, + done: Arc, +} + +pub struct LockServerStarted { + done: Arc, + addr: SocketAddr, + thread: Option>, +} + +pub struct LockServerClient { + _socket: TcpStream, +} + +struct ServerClient { + thread: Option>, + lock: Arc)>>, +} + +impl LockServer { + pub fn new() -> Result { + let listener = TcpListener::bind("127.0.0.1:0") + .with_context(|_| "failed to bind TCP listener to manage locking")?; + let addr = listener.local_addr()?; + Ok(LockServer { + listener, + addr, + threads: HashMap::new(), + done: Arc::new(AtomicBool::new(false)), + }) + } + + pub fn addr(&self) -> &SocketAddr { + &self.addr + } + + pub fn start(self) -> Result { + let addr = self.addr; + let done = self.done.clone(); + let thread = thread::spawn(|| { + self.run(); + }); + Ok(LockServerStarted { + addr, + thread: Some(thread), + done, + }) + } + + fn run(mut self) { + while let Ok((client, _)) = self.listener.accept() { + if self.done.load(Ordering::SeqCst) { + break; + } + + // Learn the name of our connected client to figure out if it needs + // to wait for another process to release the lock. + let mut client = BufReader::new(client); + let mut name = String::new(); + if client.read_line(&mut name).is_err() { + continue; + } + let client = client.into_inner(); + + // If this "named mutex" is already registered and the thread is + // still going, put it on the queue. Otherwise wait on the previous + // thread and we'll replace it just below. + if let Some(t) = self.threads.get_mut(&name) { + let mut state = t.lock.lock().unwrap(); + if state.0 { + state.1.push(client); + continue; + } + drop(t.thread.take().unwrap().join()); + } + + let lock = Arc::new(Mutex::new((true, vec![client]))); + let lock2 = lock.clone(); + let thread = thread::spawn(move || { + loop { + let mut client = { + let mut state = lock2.lock().unwrap(); + if state.1.is_empty() { + state.0 = false; + break; + } else { + state.1.remove(0) + } + }; + // Inform this client that it now has the lock and wait for + // it to disconnect by waiting for EOF. + if client.write_all(&[1]).is_err() { + continue; + } + let mut dst = Vec::new(); + drop(client.read_to_end(&mut dst)); + } + }); + + self.threads.insert( + name, + ServerClient { + thread: Some(thread), + lock, + }, + ); + } + } +} + +impl Drop for LockServer { + fn drop(&mut self) { + for (_, mut client) in self.threads.drain() { + if let Some(thread) = client.thread.take() { + drop(thread.join()); + } + } + } +} + +impl Drop for LockServerStarted { + fn drop(&mut self) { + self.done.store(true, Ordering::SeqCst); + // Ignore errors here as this is largely best-effort + if TcpStream::connect(&self.addr).is_err() { + return; + } + drop(self.thread.take().unwrap().join()); + } +} + +impl LockServerClient { + pub fn lock(addr: &SocketAddr, name: impl AsRef<[u8]>) -> Result { + let mut client = TcpStream::connect(&addr) + .with_context(|_| "failed to connect to parent lock server")?; + client + .write_all(name.as_ref()) + .and_then(|_| client.write_all(b"\n")) + .with_context(|_| "failed to write to lock server")?; + let mut buf = [0]; + client + .read_exact(&mut buf) + .with_context(|_| "failed to acquire lock")?; + Ok(LockServerClient { _socket: client }) + } +} diff --git a/src/cargo/util/machine_message.rs b/src/cargo/util/machine_message.rs new file mode 100644 index 000000000..a1dad7dd6 --- /dev/null +++ b/src/cargo/util/machine_message.rs @@ -0,0 +1,75 @@ +use std::path::PathBuf; + +use serde::ser; +use serde::Serialize; +use serde_json::{self, json, value::RawValue}; + +use crate::core::{PackageId, Target}; + +pub trait Message: ser::Serialize { + fn reason(&self) -> &str; +} + +pub fn emit(t: &T) { + let json = serde_json::to_string(t).unwrap(); + assert!(json.starts_with("{\"")); + let reason = json!(t.reason()); + println!("{{\"reason\":{},{}", reason, &json[1..]); +} + +#[derive(Serialize)] +pub struct FromCompiler<'a> { + pub package_id: PackageId, + pub target: &'a Target, + pub message: Box, +} + +impl<'a> Message for FromCompiler<'a> { + fn reason(&self) -> &str { + "compiler-message" + } +} + +#[derive(Serialize)] +pub struct Artifact<'a> { + pub package_id: PackageId, + pub target: &'a Target, + pub profile: ArtifactProfile, + pub features: Vec, + pub filenames: Vec, + pub executable: Option, + pub fresh: bool, +} + +impl<'a> Message for Artifact<'a> { + fn reason(&self) -> &str { + "compiler-artifact" + } +} + +/// This is different from the regular `Profile` to maintain backwards +/// compatibility (in particular, `test` is no longer in `Profile`, but we +/// still want it to be included here). +#[derive(Serialize)] +pub struct ArtifactProfile { + pub opt_level: &'static str, + pub debuginfo: Option, + pub debug_assertions: bool, + pub overflow_checks: bool, + pub test: bool, +} + +#[derive(Serialize)] +pub struct BuildScript<'a> { + pub package_id: PackageId, + pub linked_libs: &'a [String], + pub linked_paths: &'a [String], + pub cfgs: &'a [String], + pub env: &'a [(String, String)], +} + +impl<'a> Message for BuildScript<'a> { + fn reason(&self) -> &str { + "build-script-executed" + } +} diff --git a/src/cargo/util/mod.rs b/src/cargo/util/mod.rs new file mode 100644 index 000000000..d973016ba --- /dev/null +++ b/src/cargo/util/mod.rs @@ -0,0 +1,82 @@ +use std::time::Duration; + +pub use self::cfg::{Cfg, CfgExpr}; +pub use self::config::{homedir, Config, ConfigValue}; +pub use self::dependency_queue::{DependencyQueue, Dirty, Fresh, Freshness}; +pub use self::diagnostic_server::RustfixDiagnosticServer; +pub use self::errors::{internal, process_error}; +pub use self::errors::{CargoResult, CargoResultExt, CliResult, Test}; +pub use self::errors::{CargoTestError, CliError, ProcessError}; +pub use self::flock::{FileLock, Filesystem}; +pub use self::graph::Graph; +pub use self::hex::{hash_u64, short_hash, to_hex}; +pub use self::lev_distance::lev_distance; +pub use self::lockserver::{LockServer, LockServerClient, LockServerStarted}; +pub use self::paths::{bytes2path, dylib_path, join_paths, path2bytes}; +pub use self::paths::{dylib_path_envvar, normalize_path}; +pub use self::process_builder::{process, ProcessBuilder}; +pub use self::progress::{Progress, ProgressStyle}; +pub use self::read2::read2; +pub use self::rustc::Rustc; +pub use self::sha256::Sha256; +pub use self::to_semver::ToSemver; +pub use self::to_url::ToUrl; +pub use self::vcs::{existing_vcs_repo, FossilRepo, GitRepo, HgRepo, PijulRepo}; +pub use self::workspace::{ + print_available_benches, print_available_binaries, print_available_examples, + print_available_tests, +}; + +mod cfg; +pub mod command_prelude; +pub mod config; +mod dependency_queue; +pub mod diagnostic_server; +pub mod errors; +mod flock; +pub mod graph; +pub mod hex; +pub mod important_paths; +pub mod job; +pub mod lev_distance; +mod lockserver; +pub mod machine_message; +pub mod network; +pub mod paths; +pub mod process_builder; +pub mod profile; +mod progress; +mod read2; +mod rustc; +mod sha256; +pub mod to_semver; +pub mod to_url; +pub mod toml; +mod vcs; +mod workspace; + +pub fn elapsed(duration: Duration) -> String { + let secs = duration.as_secs(); + + if secs >= 60 { + format!("{}m {:02}s", secs / 60, secs % 60) + } else { + format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000) + } +} + +/// Check the base requirements for a package name. +/// +/// This can be used for other things than package names, to enforce some +/// level of sanity. Note that package names have other restrictions +/// elsewhere. `cargo new` has a few restrictions, such as checking for +/// reserved names. crates.io has even more restrictions. +pub fn validate_package_name(name: &str, what: &str, help: &str) -> CargoResult<()> { + if let Some(ch) = name + .chars() + .find(|ch| !ch.is_alphanumeric() && *ch != '_' && *ch != '-') + { + failure::bail!("Invalid character `{}` in {}: `{}`{}", ch, what, name, help); + } + Ok(()) +} diff --git a/src/cargo/util/network.rs b/src/cargo/util/network.rs new file mode 100644 index 000000000..310f24a65 --- /dev/null +++ b/src/cargo/util/network.rs @@ -0,0 +1,127 @@ +use curl; +use git2; + +use failure::Error; + +use crate::util::errors::{CargoResult, HttpNot200}; +use crate::util::Config; + +pub struct Retry<'a> { + config: &'a Config, + remaining: u32, +} + +impl<'a> Retry<'a> { + pub fn new(config: &'a Config) -> CargoResult> { + Ok(Retry { + config, + remaining: config.get::>("net.retry")?.unwrap_or(2), + }) + } + + pub fn r#try(&mut self, f: impl FnOnce() -> CargoResult) -> CargoResult> { + match f() { + Err(ref e) if maybe_spurious(e) && self.remaining > 0 => { + let msg = format!( + "spurious network error ({} tries \ + remaining): {}", + self.remaining, e + ); + self.config.shell().warn(msg)?; + self.remaining -= 1; + Ok(None) + } + other => other.map(Some), + } + } +} + +fn maybe_spurious(err: &Error) -> bool { + for e in err.iter_chain() { + if let Some(git_err) = e.downcast_ref::() { + match git_err.class() { + git2::ErrorClass::Net | git2::ErrorClass::Os => return true, + _ => (), + } + } + if let Some(curl_err) = e.downcast_ref::() { + if curl_err.is_couldnt_connect() + || curl_err.is_couldnt_resolve_proxy() + || curl_err.is_couldnt_resolve_host() + || curl_err.is_operation_timedout() + || curl_err.is_recv_error() + { + return true; + } + } + if let Some(not_200) = e.downcast_ref::() { + if 500 <= not_200.code && not_200.code < 600 { + return true; + } + } + } + false +} + +/// Wrapper method for network call retry logic. +/// +/// Retry counts provided by Config object `net.retry`. Config shell outputs +/// a warning on per retry. +/// +/// Closure must return a `CargoResult`. +/// +/// # Examples +/// +/// ```ignore +/// use util::network; +/// cargo_result = network::with_retry(&config, || something.download()); +/// ``` +pub fn with_retry(config: &Config, mut callback: F) -> CargoResult +where + F: FnMut() -> CargoResult, +{ + let mut retry = Retry::new(config)?; + loop { + if let Some(ret) = retry.r#try(&mut callback)? { + return Ok(ret); + } + } +} +#[test] +fn with_retry_repeats_the_call_then_works() { + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + let error1 = HttpNot200 { + code: 501, + url: "Uri".to_string(), + } + .into(); + let error2 = HttpNot200 { + code: 502, + url: "Uri".to_string(), + } + .into(); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} + +#[test] +fn with_retry_finds_nested_spurious_errors() { + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + //String error messages are not considered spurious + let error1 = failure::Error::from(HttpNot200 { + code: 501, + url: "Uri".to_string(), + }); + let error1 = failure::Error::from(error1.context("A non-spurious wrapping err")); + let error2 = failure::Error::from(HttpNot200 { + code: 502, + url: "Uri".to_string(), + }); + let error2 = failure::Error::from(error2.context("A second chained error")); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} diff --git a/src/cargo/util/paths.rs b/src/cargo/util/paths.rs new file mode 100644 index 000000000..00ee3f5ce --- /dev/null +++ b/src/cargo/util/paths.rs @@ -0,0 +1,332 @@ +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs::{self, File, OpenOptions}; +use std::io; +use std::io::prelude::*; +use std::iter; +use std::path::{Component, Path, PathBuf}; + +use filetime::FileTime; + +use crate::util::errors::{CargoResult, CargoResultExt, Internal}; + +pub fn join_paths>(paths: &[T], env: &str) -> CargoResult { + let err = match env::join_paths(paths.iter()) { + Ok(paths) => return Ok(paths), + Err(e) => e, + }; + let paths = paths.iter().map(Path::new).collect::>(); + let err = failure::Error::from(err); + let explain = Internal::new(failure::format_err!( + "failed to join path array: {:?}", + paths + )); + let err = failure::Error::from(err.context(explain)); + let more_explain = format!( + "failed to join search paths together\n\ + Does ${} have an unterminated quote character?", + env + ); + Err(err.context(more_explain).into()) +} + +pub fn dylib_path_envvar() -> &'static str { + if cfg!(windows) { + "PATH" + } else if cfg!(target_os = "macos") { + // When loading and linking a dynamic library or bundle, dlopen + // searches in LD_LIBRARY_PATH, DYLD_LIBRARY_PATH, PWD, and + // DYLD_FALLBACK_LIBRARY_PATH. + // In the Mach-O format, a dynamic library has an "install path." + // Clients linking against the library record this path, and the + // dynamic linker, dyld, uses it to locate the library. + // dyld searches DYLD_LIBRARY_PATH *before* the install path. + // dyld searches DYLD_FALLBACK_LIBRARY_PATH only if it cannot + // find the library in the install path. + // Setting DYLD_LIBRARY_PATH can easily have unintended + // consequences. + // + // Also, DYLD_LIBRARY_PATH appears to have significant performance + // penalty starting in 10.13. Cargo's testsuite ran more than twice as + // slow with it on CI. + "DYLD_FALLBACK_LIBRARY_PATH" + } else { + "LD_LIBRARY_PATH" + } +} + +pub fn dylib_path() -> Vec { + match env::var_os(dylib_path_envvar()) { + Some(var) => env::split_paths(&var).collect(), + None => Vec::new(), + } +} + +pub fn normalize_path(path: &Path) -> PathBuf { + let mut components = path.components().peekable(); + let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() { + components.next(); + PathBuf::from(c.as_os_str()) + } else { + PathBuf::new() + }; + + for component in components { + match component { + Component::Prefix(..) => unreachable!(), + Component::RootDir => { + ret.push(component.as_os_str()); + } + Component::CurDir => {} + Component::ParentDir => { + ret.pop(); + } + Component::Normal(c) => { + ret.push(c); + } + } + } + ret +} + +pub fn resolve_executable(exec: &Path) -> CargoResult { + if exec.components().count() == 1 { + let paths = env::var_os("PATH").ok_or_else(|| failure::format_err!("no PATH"))?; + let candidates = env::split_paths(&paths).flat_map(|path| { + let candidate = path.join(&exec); + let with_exe = if env::consts::EXE_EXTENSION == "" { + None + } else { + Some(candidate.with_extension(env::consts::EXE_EXTENSION)) + }; + iter::once(candidate).chain(with_exe) + }); + for candidate in candidates { + if candidate.is_file() { + // PATH may have a component like "." in it, so we still need to + // canonicalize. + return Ok(candidate.canonicalize()?); + } + } + + failure::bail!("no executable for `{}` found in PATH", exec.display()) + } else { + Ok(exec.canonicalize()?) + } +} + +pub fn read(path: &Path) -> CargoResult { + match String::from_utf8(read_bytes(path)?) { + Ok(s) => Ok(s), + Err(_) => failure::bail!("path at `{}` was not valid utf-8", path.display()), + } +} + +pub fn read_bytes(path: &Path) -> CargoResult> { + let res = (|| -> CargoResult<_> { + let mut ret = Vec::new(); + let mut f = File::open(path)?; + if let Ok(m) = f.metadata() { + ret.reserve(m.len() as usize + 1); + } + f.read_to_end(&mut ret)?; + Ok(ret) + })() + .chain_err(|| format!("failed to read `{}`", path.display()))?; + Ok(res) +} + +pub fn write(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = File::create(path)?; + f.write_all(contents)?; + Ok(()) + })() + .chain_err(|| format!("failed to write `{}`", path.display()))?; + Ok(()) +} + +pub fn write_if_changed, C: AsRef<[u8]>>(path: P, contents: C) -> CargoResult<()> { + (|| -> CargoResult<()> { + let contents = contents.as_ref(); + let mut f = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path)?; + let mut orig = Vec::new(); + f.read_to_end(&mut orig)?; + if orig != contents { + f.set_len(0)?; + f.seek(io::SeekFrom::Start(0))?; + f.write_all(contents)?; + } + Ok(()) + })() + .chain_err(|| format!("failed to write `{}`", path.as_ref().display()))?; + Ok(()) +} + +pub fn append(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = OpenOptions::new() + .write(true) + .append(true) + .create(true) + .open(path)?; + + f.write_all(contents)?; + Ok(()) + })() + .chain_err(|| format!("failed to write `{}`", path.display()))?; + Ok(()) +} + +pub fn mtime(path: &Path) -> CargoResult { + let meta = fs::metadata(path).chain_err(|| format!("failed to stat `{}`", path.display()))?; + Ok(FileTime::from_last_modification_time(&meta)) +} + +/// get `FileTime::from_system_time(SystemTime::now());` using the exact clock that this file system is using. +pub fn get_current_filesystem_time(path: &Path) -> CargoResult { + // note that if `FileTime::from_system_time(SystemTime::now());` is determined to be sufficient, + // then this can be removed. + let timestamp = path.with_file_name("invoked.timestamp"); + write( + ×tamp, + b"This file has an mtime of when this was started.", + )?; + Ok(mtime(×tamp)?) +} + +#[cfg(unix)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + use std::os::unix::prelude::*; + Ok(path.as_os_str().as_bytes()) +} +#[cfg(windows)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + match path.as_os_str().to_str() { + Some(s) => Ok(s.as_bytes()), + None => Err(failure::format_err!( + "invalid non-unicode path: {}", + path.display() + )), + } +} + +#[cfg(unix)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::os::unix::prelude::*; + Ok(PathBuf::from(OsStr::from_bytes(bytes))) +} +#[cfg(windows)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(bytes) { + Ok(s) => Ok(PathBuf::from(s)), + Err(..) => Err(failure::format_err!("invalid non-unicode path")), + } +} + +pub fn ancestors(path: &Path) -> PathAncestors<'_> { + PathAncestors::new(path) +} + +pub struct PathAncestors<'a> { + current: Option<&'a Path>, + stop_at: Option, +} + +impl<'a> PathAncestors<'a> { + fn new(path: &Path) -> PathAncestors<'_> { + PathAncestors { + current: Some(path), + //HACK: avoid reading `~/.cargo/config` when testing Cargo itself. + stop_at: env::var("__CARGO_TEST_ROOT").ok().map(PathBuf::from), + } + } +} + +impl<'a> Iterator for PathAncestors<'a> { + type Item = &'a Path; + + fn next(&mut self) -> Option<&'a Path> { + if let Some(path) = self.current { + self.current = path.parent(); + + if let Some(ref stop_at) = self.stop_at { + if path == stop_at { + self.current = None; + } + } + + Some(path) + } else { + None + } + } +} + +pub fn remove_dir_all>(p: P) -> CargoResult<()> { + _remove_dir_all(p.as_ref()) +} + +fn _remove_dir_all(p: &Path) -> CargoResult<()> { + if p.symlink_metadata()?.file_type().is_symlink() { + return remove_file(p); + } + let entries = p + .read_dir() + .chain_err(|| format!("failed to read directory `{}`", p.display()))?; + for entry in entries { + let entry = entry?; + let path = entry.path(); + if entry.file_type()?.is_dir() { + remove_dir_all(&path)?; + } else { + remove_file(&path)?; + } + } + remove_dir(&p) +} + +pub fn remove_dir>(p: P) -> CargoResult<()> { + _remove_dir(p.as_ref()) +} + +fn _remove_dir(p: &Path) -> CargoResult<()> { + fs::remove_dir(p).chain_err(|| format!("failed to remove directory `{}`", p.display()))?; + Ok(()) +} + +pub fn remove_file>(p: P) -> CargoResult<()> { + _remove_file(p.as_ref()) +} + +fn _remove_file(p: &Path) -> CargoResult<()> { + let mut err = match fs::remove_file(p) { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + if err.kind() == io::ErrorKind::PermissionDenied && set_not_readonly(p).unwrap_or(false) { + match fs::remove_file(p) { + Ok(()) => return Ok(()), + Err(e) => err = e, + } + } + + Err(err).chain_err(|| format!("failed to remove file `{}`", p.display()))?; + Ok(()) +} + +fn set_not_readonly(p: &Path) -> io::Result { + let mut perms = p.metadata()?.permissions(); + if !perms.readonly() { + return Ok(false); + } + perms.set_readonly(false); + fs::set_permissions(p, perms)?; + Ok(true) +} diff --git a/src/cargo/util/process_builder.rs b/src/cargo/util/process_builder.rs new file mode 100644 index 000000000..de96420cc --- /dev/null +++ b/src/cargo/util/process_builder.rs @@ -0,0 +1,387 @@ +use std::collections::HashMap; +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fmt; +use std::path::Path; +use std::process::{Command, Output, Stdio}; + +use failure::Fail; +use jobserver::Client; +use shell_escape::escape; + +use crate::util::{process_error, read2, CargoResult, CargoResultExt}; + +/// A builder object for an external process, similar to `std::process::Command`. +#[derive(Clone, Debug)] +pub struct ProcessBuilder { + /// The program to execute. + program: OsString, + /// A list of arguments to pass to the program. + args: Vec, + /// Any environment variables that should be set for the program. + env: HashMap>, + /// The directory to run the program from. + cwd: Option, + /// The `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + jobserver: Option, + /// `true` to include environment variable in display. + display_env_vars: bool, +} + +impl fmt::Display for ProcessBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "`")?; + + if self.display_env_vars { + for (key, val) in self.env.iter() { + if let Some(val) = val { + let val = escape(val.to_string_lossy()); + if cfg!(windows) { + write!(f, "set {}={}&& ", key, val)?; + } else { + write!(f, "{}={} ", key, val)?; + } + } + } + } + + write!(f, "{}", self.program.to_string_lossy())?; + + for arg in &self.args { + write!(f, " {}", escape(arg.to_string_lossy()))?; + } + + write!(f, "`") + } +} + +impl ProcessBuilder { + /// (chainable) Sets the executable for the process. + pub fn program>(&mut self, program: T) -> &mut ProcessBuilder { + self.program = program.as_ref().to_os_string(); + self + } + + /// (chainable) Adds `arg` to the args list. + pub fn arg>(&mut self, arg: T) -> &mut ProcessBuilder { + self.args.push(arg.as_ref().to_os_string()); + self + } + + /// (chainable) Adds multiple `args` to the args list. + pub fn args>(&mut self, args: &[T]) -> &mut ProcessBuilder { + self.args + .extend(args.iter().map(|t| t.as_ref().to_os_string())); + self + } + + /// (chainable) Replaces the args list with the given `args`. + pub fn args_replace>(&mut self, args: &[T]) -> &mut ProcessBuilder { + self.args = args + .iter() + .map(|t| t.as_ref().to_os_string()) + .collect(); + self + } + + /// (chainable) Sets the current working directory of the process. + pub fn cwd>(&mut self, path: T) -> &mut ProcessBuilder { + self.cwd = Some(path.as_ref().to_os_string()); + self + } + + /// (chainable) Sets an environment variable for the process. + pub fn env>(&mut self, key: &str, val: T) -> &mut ProcessBuilder { + self.env + .insert(key.to_string(), Some(val.as_ref().to_os_string())); + self + } + + /// (chainable) Unsets an environment variable for the process. + pub fn env_remove(&mut self, key: &str) -> &mut ProcessBuilder { + self.env.insert(key.to_string(), None); + self + } + + /// Gets the executable name. + pub fn get_program(&self) -> &OsString { + &self.program + } + + /// Gets the program arguments. + pub fn get_args(&self) -> &[OsString] { + &self.args + } + + /// Gets the current working directory for the process. + pub fn get_cwd(&self) -> Option<&Path> { + self.cwd.as_ref().map(Path::new) + } + + /// Gets an environment variable as the process will see it (will inherit from environment + /// unless explicitally unset). + pub fn get_env(&self, var: &str) -> Option { + self.env + .get(var) + .cloned() + .or_else(|| Some(env::var_os(var))) + .and_then(|s| s) + } + + /// Gets all environment variables explicitly set or unset for the process (not inherited + /// vars). + pub fn get_envs(&self) -> &HashMap> { + &self.env + } + + /// Sets the `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + pub fn inherit_jobserver(&mut self, jobserver: &Client) -> &mut Self { + self.jobserver = Some(jobserver.clone()); + self + } + + /// Enables environment variable display. + pub fn display_env_vars(&mut self) -> &mut Self { + self.display_env_vars = true; + self + } + + /// Runs the process, waiting for completion, and mapping non-success exit codes to an error. + pub fn exec(&self) -> CargoResult<()> { + let mut command = self.build_command(); + let exit = command.status().chain_err(|| { + process_error(&format!("could not execute process {}", self), None, None) + })?; + + if exit.success() { + Ok(()) + } else { + Err(process_error( + &format!("process didn't exit successfully: {}", self), + Some(exit), + None, + ) + .into()) + } + } + + /// Replaces the current process with the target process. + /// + /// On Unix, this executes the process using the Unix syscall `execvp`, which will block + /// this process, and will only return if there is an error. + /// + /// On Windows this isn't technically possible. Instead we emulate it to the best of our + /// ability. One aspect we fix here is that we specify a handler for the Ctrl-C handler. + /// In doing so (and by effectively ignoring it) we should emulate proxying Ctrl-C + /// handling to the application at hand, which will either terminate or handle it itself. + /// According to Microsoft's documentation at + /// . + /// the Ctrl-C signal is sent to all processes attached to a terminal, which should + /// include our child process. If the child terminates then we'll reap them in Cargo + /// pretty quickly, and if the child handles the signal then we won't terminate + /// (and we shouldn't!) until the process itself later exits. + pub fn exec_replace(&self) -> CargoResult<()> { + imp::exec_replace(self) + } + + /// Executes the process, returning the stdio output, or an error if non-zero exit status. + pub fn exec_with_output(&self) -> CargoResult { + let mut command = self.build_command(); + + let output = command.output().chain_err(|| { + process_error(&format!("could not execute process {}", self), None, None) + })?; + + if output.status.success() { + Ok(output) + } else { + Err(process_error( + &format!("process didn't exit successfully: {}", self), + Some(output.status), + Some(&output), + ) + .into()) + } + } + + /// Executes a command, passing each line of stdout and stderr to the supplied callbacks, which + /// can mutate the string data. + /// + /// If any invocations of these function return an error, it will be propagated. + /// + /// Optionally, output can be passed to errors using `print_output` + pub fn exec_with_streaming( + &self, + on_stdout_line: &mut dyn FnMut(&str) -> CargoResult<()>, + on_stderr_line: &mut dyn FnMut(&str) -> CargoResult<()>, + capture_output: bool, + ) -> CargoResult { + let mut stdout = Vec::new(); + let mut stderr = Vec::new(); + + let mut cmd = self.build_command(); + cmd.stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()); + + let mut callback_error = None; + let status = (|| { + let mut child = cmd.spawn()?; + let out = child.stdout.take().unwrap(); + let err = child.stderr.take().unwrap(); + read2(out, err, &mut |is_out, data, eof| { + let idx = if eof { + data.len() + } else { + match data.iter().rposition(|b| *b == b'\n') { + Some(i) => i + 1, + None => return, + } + }; + { + // scope for new_lines + let new_lines = if capture_output { + let dst = if is_out { &mut stdout } else { &mut stderr }; + let start = dst.len(); + let data = data.drain(..idx); + dst.extend(data); + &dst[start..] + } else { + &data[..idx] + }; + for line in String::from_utf8_lossy(new_lines).lines() { + if callback_error.is_some() { + break; + } + let callback_result = if is_out { + on_stdout_line(line) + } else { + on_stderr_line(line) + }; + if let Err(e) = callback_result { + callback_error = Some(e); + } + } + } + if !capture_output { + data.drain(..idx); + } + })?; + child.wait() + })() + .chain_err(|| process_error(&format!("could not execute process {}", self), None, None))?; + let output = Output { + stdout, + stderr, + status, + }; + + { + let to_print = if capture_output { Some(&output) } else { None }; + if let Some(e) = callback_error { + let cx = process_error( + &format!("failed to parse process output: {}", self), + Some(output.status), + to_print, + ); + return Err(cx.context(e).into()); + } else if !output.status.success() { + return Err(process_error( + &format!("process didn't exit successfully: {}", self), + Some(output.status), + to_print, + ) + .into()); + } + } + + Ok(output) + } + + /// Converts `ProcessBuilder` into a `std::process::Command`, and handles the jobserver, if + /// present. + pub fn build_command(&self) -> Command { + let mut command = Command::new(&self.program); + if let Some(cwd) = self.get_cwd() { + command.current_dir(cwd); + } + for arg in &self.args { + command.arg(arg); + } + for (k, v) in &self.env { + match *v { + Some(ref v) => { + command.env(k, v); + } + None => { + command.env_remove(k); + } + } + } + if let Some(ref c) = self.jobserver { + c.configure(&mut command); + } + command + } +} + +/// A helper function to create a `ProcessBuilder`. +pub fn process>(cmd: T) -> ProcessBuilder { + ProcessBuilder { + program: cmd.as_ref().to_os_string(), + args: Vec::new(), + cwd: None, + env: HashMap::new(), + jobserver: None, + display_env_vars: false, + } +} + +#[cfg(unix)] +mod imp { + use crate::util::{process_error, ProcessBuilder}; + use crate::CargoResult; + use std::os::unix::process::CommandExt; + + pub fn exec_replace(process_builder: &ProcessBuilder) -> CargoResult<()> { + let mut command = process_builder.build_command(); + let error = command.exec(); + Err(failure::Error::from(error) + .context(process_error( + &format!("could not execute process {}", process_builder), + None, + None, + )) + .into()) + } +} + +#[cfg(windows)] +mod imp { + use crate::util::{process_error, ProcessBuilder}; + use crate::CargoResult; + use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE}; + use winapi::um::consoleapi::SetConsoleCtrlHandler; + + unsafe extern "system" fn ctrlc_handler(_: DWORD) -> BOOL { + // Do nothing; let the child process handle it. + TRUE + } + + pub fn exec_replace(process_builder: &ProcessBuilder) -> CargoResult<()> { + unsafe { + if SetConsoleCtrlHandler(Some(ctrlc_handler), TRUE) == FALSE { + return Err(process_error("Could not set Ctrl-C handler.", None, None).into()); + } + } + + // Just execute the process as normal. + process_builder.exec() + } +} diff --git a/src/cargo/util/profile.rs b/src/cargo/util/profile.rs new file mode 100644 index 000000000..b450d1205 --- /dev/null +++ b/src/cargo/util/profile.rs @@ -0,0 +1,89 @@ +use std::cell::RefCell; +use std::env; +use std::fmt; +use std::io::{stdout, StdoutLock, Write}; +use std::iter::repeat; +use std::mem; +use std::time; + +thread_local!(static PROFILE_STACK: RefCell> = RefCell::new(Vec::new())); +thread_local!(static MESSAGES: RefCell> = RefCell::new(Vec::new())); + +type Message = (usize, u64, String); + +pub struct Profiler { + desc: String, +} + +fn enabled_level() -> Option { + env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok()) +} + +pub fn start(desc: T) -> Profiler { + if enabled_level().is_none() { + return Profiler { + desc: String::new(), + }; + } + + PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now())); + + Profiler { + desc: desc.to_string(), + } +} + +impl Drop for Profiler { + fn drop(&mut self) { + let enabled = match enabled_level() { + Some(i) => i, + None => return, + }; + + let (start, stack_len) = PROFILE_STACK.with(|stack| { + let mut stack = stack.borrow_mut(); + let start = stack.pop().unwrap(); + (start, stack.len()) + }); + let duration = start.elapsed(); + let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_millis()); + + let msg = ( + stack_len, + duration_ms, + mem::replace(&mut self.desc, String::new()), + ); + MESSAGES.with(|msgs| msgs.borrow_mut().push(msg)); + + if stack_len == 0 { + fn print(lvl: usize, msgs: &[Message], enabled: usize, stdout: &mut StdoutLock<'_>) { + if lvl > enabled { + return; + } + let mut last = 0; + for (i, &(l, time, ref msg)) in msgs.iter().enumerate() { + if l != lvl { + continue; + } + writeln!( + stdout, + "{} {:6}ms - {}", + repeat(" ").take(lvl + 1).collect::(), + time, + msg + ) + .expect("printing profiling info to stdout"); + + print(lvl + 1, &msgs[last..i], enabled, stdout); + last = i; + } + } + let stdout = stdout(); + MESSAGES.with(|msgs| { + let mut msgs = msgs.borrow_mut(); + print(0, &msgs, enabled, &mut stdout.lock()); + msgs.clear(); + }); + } + } +} diff --git a/src/cargo/util/progress.rs b/src/cargo/util/progress.rs new file mode 100644 index 000000000..a72796278 --- /dev/null +++ b/src/cargo/util/progress.rs @@ -0,0 +1,419 @@ +use std::cmp; +use std::env; +use std::time::{Duration, Instant}; + +use crate::core::shell::Verbosity; +use crate::util::{CargoResult, Config}; + +use unicode_width::UnicodeWidthChar; + +pub struct Progress<'cfg> { + state: Option>, +} + +pub enum ProgressStyle { + Percentage, + Ratio, +} + +struct Throttle { + first: bool, + last_update: Instant, +} + +struct State<'cfg> { + config: &'cfg Config, + format: Format, + name: String, + done: bool, + throttle: Throttle, + last_line: Option, +} + +struct Format { + style: ProgressStyle, + max_width: usize, + max_print: usize, +} + +impl<'cfg> Progress<'cfg> { + pub fn with_style(name: &str, style: ProgressStyle, cfg: &'cfg Config) -> Progress<'cfg> { + // report no progress when -q (for quiet) or TERM=dumb are set + // or if running on Continuous Integration service like Travis where the + // output logs get mangled. + let dumb = match env::var("TERM") { + Ok(term) => term == "dumb", + Err(_) => false, + }; + if cfg.shell().verbosity() == Verbosity::Quiet || dumb || env::var("CI").is_ok() { + return Progress { state: None }; + } + + Progress { + state: cfg.shell().err_width().map(|n| State { + config: cfg, + format: Format { + style, + max_width: n, + max_print: 80, + }, + name: name.to_string(), + done: false, + throttle: Throttle::new(), + last_line: None, + }), + } + } + + pub fn disable(&mut self) { + self.state = None; + } + + pub fn is_enabled(&self) -> bool { + self.state.is_some() + } + + pub fn new(name: &str, cfg: &'cfg Config) -> Progress<'cfg> { + Self::with_style(name, ProgressStyle::Percentage, cfg) + } + + pub fn tick(&mut self, cur: usize, max: usize) -> CargoResult<()> { + let s = match &mut self.state { + Some(s) => s, + None => return Ok(()), + }; + + // Don't update too often as it can cause excessive performance loss + // just putting stuff onto the terminal. We also want to avoid + // flickering by not drawing anything that goes away too quickly. As a + // result we've got two branches here: + // + // 1. If we haven't drawn anything, we wait for a period of time to + // actually start drawing to the console. This ensures that + // short-lived operations don't flicker on the console. Currently + // there's a 500ms delay to when we first draw something. + // 2. If we've drawn something, then we rate limit ourselves to only + // draw to the console every so often. Currently there's a 100ms + // delay between updates. + if !s.throttle.allowed() { + return Ok(()); + } + + s.tick(cur, max, "") + } + + pub fn tick_now(&mut self, cur: usize, max: usize, msg: &str) -> CargoResult<()> { + match self.state { + Some(ref mut s) => s.tick(cur, max, msg), + None => Ok(()), + } + } + + pub fn update_allowed(&mut self) -> bool { + match &mut self.state { + Some(s) => s.throttle.allowed(), + None => false, + } + } + + pub fn print_now(&mut self, msg: &str) -> CargoResult<()> { + match &mut self.state { + Some(s) => s.print("", msg), + None => Ok(()), + } + } + + pub fn clear(&mut self) { + if let Some(ref mut s) = self.state { + s.clear(); + } + } +} + +impl Throttle { + fn new() -> Throttle { + Throttle { + first: true, + last_update: Instant::now(), + } + } + + fn allowed(&mut self) -> bool { + if self.first { + let delay = Duration::from_millis(500); + if self.last_update.elapsed() < delay { + return false; + } + } else { + let interval = Duration::from_millis(100); + if self.last_update.elapsed() < interval { + return false; + } + } + self.update(); + true + } + + fn update(&mut self) { + self.first = false; + self.last_update = Instant::now(); + } +} + +impl<'cfg> State<'cfg> { + fn tick(&mut self, cur: usize, max: usize, msg: &str) -> CargoResult<()> { + if self.done { + return Ok(()); + } + + if max > 0 && cur == max { + self.done = true; + } + + // Write out a pretty header, then the progress bar itself, and then + // return back to the beginning of the line for the next print. + self.try_update_max_width(); + if let Some(pbar) = self.format.progress(cur, max) { + self.print(&pbar, msg)?; + } + Ok(()) + } + + fn print(&mut self, prefix: &str, msg: &str) -> CargoResult<()> { + self.throttle.update(); + self.try_update_max_width(); + + // make sure we have enough room for the header + if self.format.max_width < 15 { + return Ok(()); + } + + let mut line = prefix.to_string(); + self.format.render(&mut line, msg); + while line.len() < self.format.max_width - 15 { + line.push(' '); + } + + // Only update if the line has changed. + if self.config.shell().is_cleared() || self.last_line.as_ref() != Some(&line) { + let mut shell = self.config.shell(); + shell.set_needs_clear(false); + shell.status_header(&self.name)?; + write!(shell.err(), "{}\r", line)?; + self.last_line = Some(line); + shell.set_needs_clear(true); + } + + Ok(()) + } + + fn clear(&mut self) { + // No need to clear if the progress is not currently being displayed. + if self.last_line.is_some() && !self.config.shell().is_cleared() { + self.config.shell().err_erase_line(); + self.last_line = None; + } + } + + fn try_update_max_width(&mut self) { + if let Some(n) = self.config.shell().err_width() { + self.format.max_width = n; + } + } +} + +impl Format { + fn progress(&self, cur: usize, max: usize) -> Option { + // Render the percentage at the far right and then figure how long the + // progress bar is + let pct = (cur as f64) / (max as f64); + let pct = if !pct.is_finite() { 0.0 } else { pct }; + let stats = match self.style { + ProgressStyle::Percentage => format!(" {:6.02}%", pct * 100.0), + ProgressStyle::Ratio => format!(" {}/{}", cur, max), + }; + let extra_len = stats.len() + 2 /* [ and ] */ + 15 /* status header */; + let display_width = match self.width().checked_sub(extra_len) { + Some(n) => n, + None => return None, + }; + + let mut string = String::with_capacity(self.max_width); + string.push('['); + let hashes = display_width as f64 * pct; + let hashes = hashes as usize; + + // Draw the `===>` + if hashes > 0 { + for _ in 0..hashes - 1 { + string.push_str("="); + } + if cur == max { + string.push_str("="); + } else { + string.push_str(">"); + } + } + + // Draw the empty space we have left to do + for _ in 0..(display_width - hashes) { + string.push_str(" "); + } + string.push_str("]"); + string.push_str(&stats); + + Some(string) + } + + fn render(&self, string: &mut String, msg: &str) { + let mut avail_msg_len = self.max_width - string.len() - 15; + let mut ellipsis_pos = 0; + if avail_msg_len <= 3 { + return; + } + for c in msg.chars() { + let display_width = c.width().unwrap_or(0); + if avail_msg_len >= display_width { + avail_msg_len -= display_width; + string.push(c); + if avail_msg_len >= 3 { + ellipsis_pos = string.len(); + } + } else { + string.truncate(ellipsis_pos); + string.push_str("..."); + break; + } + } + } + + #[cfg(test)] + fn progress_status(&self, cur: usize, max: usize, msg: &str) -> Option { + let mut ret = self.progress(cur, max)?; + self.render(&mut ret, msg); + Some(ret) + } + + fn width(&self) -> usize { + cmp::min(self.max_width, self.max_print) + } +} + +impl<'cfg> Drop for State<'cfg> { + fn drop(&mut self) { + self.clear(); + } +} + +#[test] +fn test_progress_status() { + let format = Format { + style: ProgressStyle::Ratio, + max_print: 40, + max_width: 60, + }; + assert_eq!( + format.progress_status(0, 4, ""), + Some("[ ] 0/4".to_string()) + ); + assert_eq!( + format.progress_status(1, 4, ""), + Some("[===> ] 1/4".to_string()) + ); + assert_eq!( + format.progress_status(2, 4, ""), + Some("[========> ] 2/4".to_string()) + ); + assert_eq!( + format.progress_status(3, 4, ""), + Some("[=============> ] 3/4".to_string()) + ); + assert_eq!( + format.progress_status(4, 4, ""), + Some("[===================] 4/4".to_string()) + ); + + assert_eq!( + format.progress_status(3999, 4000, ""), + Some("[===========> ] 3999/4000".to_string()) + ); + assert_eq!( + format.progress_status(4000, 4000, ""), + Some("[=============] 4000/4000".to_string()) + ); + + assert_eq!( + format.progress_status(3, 4, ": short message"), + Some("[=============> ] 3/4: short message".to_string()) + ); + assert_eq!( + format.progress_status(3, 4, ": msg thats just fit"), + Some("[=============> ] 3/4: msg thats just fit".to_string()) + ); + assert_eq!( + format.progress_status(3, 4, ": msg that's just fit"), + Some("[=============> ] 3/4: msg that's just...".to_string()) + ); + + // combining diacritics have width zero and thus can fit max_width. + let zalgo_msg = "z̸̧̢̗͉̝̦͍̱ͧͦͨ̑̅̌ͥ́͢a̢ͬͨ̽ͯ̅̑ͥ͋̏̑ͫ̄͢͏̫̝̪̤͎̱̣͍̭̞̙̱͙͍̘̭͚l̶̡̛̥̝̰̭̹̯̯̞̪͇̱̦͙͔̘̼͇͓̈ͨ͗ͧ̓͒ͦ̀̇ͣ̈ͭ͊͛̃̑͒̿̕͜g̸̷̢̩̻̻͚̠͓̞̥͐ͩ͌̑ͥ̊̽͋͐̐͌͛̐̇̑ͨ́ͅo͙̳̣͔̰̠̜͕͕̞̦̙̭̜̯̹̬̻̓͑ͦ͋̈̉͌̃ͯ̀̂͠ͅ ̸̡͎̦̲̖̤̺̜̮̱̰̥͔̯̅̏ͬ̂ͨ̋̃̽̈́̾̔̇ͣ̚͜͜h̡ͫ̐̅̿̍̀͜҉̛͇̭̹̰̠͙̞ẽ̶̙̹̳̖͉͎̦͂̋̓ͮ̔ͬ̐̀͂̌͑̒͆̚͜͠ ͓͓̟͍̮̬̝̝̰͓͎̼̻ͦ͐̾̔͒̃̓͟͟c̮̦͍̺͈͚̯͕̄̒͐̂͊̊͗͊ͤͣ̀͘̕͝͞o̶͍͚͍̣̮͌ͦ̽̑ͩ̅ͮ̐̽̏͗́͂̅ͪ͠m̷̧͖̻͔̥̪̭͉͉̤̻͖̩̤͖̘ͦ̂͌̆̂ͦ̒͊ͯͬ͊̉̌ͬ͝͡e̵̹̣͍̜̺̤̤̯̫̹̠̮͎͙̯͚̰̼͗͐̀̒͂̉̀̚͝͞s̵̲͍͙͖̪͓͓̺̱̭̩̣͖̣ͤͤ͂̎̈͗͆ͨͪ̆̈͗͝͠"; + assert_eq!( + format.progress_status(3, 4, zalgo_msg), + Some("[=============> ] 3/4".to_string() + zalgo_msg) + ); + + // some non-ASCII ellipsize test + assert_eq!( + format.progress_status(3, 4, "_123456789123456e\u{301}\u{301}8\u{301}90a"), + Some("[=============> ] 3/4_123456789123456e\u{301}\u{301}...".to_string()) + ); + assert_eq!( + format.progress_status(3, 4, ":每個漢字佔據了兩個字元"), + Some("[=============> ] 3/4:每個漢字佔據了...".to_string()) + ); +} + +#[test] +fn test_progress_status_percentage() { + let format = Format { + style: ProgressStyle::Percentage, + max_print: 40, + max_width: 60, + }; + assert_eq!( + format.progress_status(0, 77, ""), + Some("[ ] 0.00%".to_string()) + ); + assert_eq!( + format.progress_status(1, 77, ""), + Some("[ ] 1.30%".to_string()) + ); + assert_eq!( + format.progress_status(76, 77, ""), + Some("[=============> ] 98.70%".to_string()) + ); + assert_eq!( + format.progress_status(77, 77, ""), + Some("[===============] 100.00%".to_string()) + ); +} + +#[test] +fn test_progress_status_too_short() { + let format = Format { + style: ProgressStyle::Percentage, + max_print: 25, + max_width: 25, + }; + assert_eq!( + format.progress_status(1, 1, ""), + Some("[] 100.00%".to_string()) + ); + + let format = Format { + style: ProgressStyle::Percentage, + max_print: 24, + max_width: 24, + }; + assert_eq!(format.progress_status(1, 1, ""), None); +} diff --git a/src/cargo/util/read2.rs b/src/cargo/util/read2.rs new file mode 100644 index 000000000..bfa242797 --- /dev/null +++ b/src/cargo/util/read2.rs @@ -0,0 +1,179 @@ +pub use self::imp::read2; + +#[cfg(unix)] +mod imp { + use libc; + use std::io; + use std::io::prelude::*; + use std::mem; + use std::os::unix::prelude::*; + use std::process::{ChildStderr, ChildStdout}; + + pub fn read2( + mut out_pipe: ChildStdout, + mut err_pipe: ChildStderr, + data: &mut dyn FnMut(bool, &mut Vec, bool), + ) -> io::Result<()> { + unsafe { + libc::fcntl(out_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + libc::fcntl(err_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + } + + let mut out_done = false; + let mut err_done = false; + let mut out = Vec::new(); + let mut err = Vec::new(); + + let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() }; + fds[0].fd = out_pipe.as_raw_fd(); + fds[0].events = libc::POLLIN; + fds[1].fd = err_pipe.as_raw_fd(); + fds[1].events = libc::POLLIN; + let mut nfds = 2; + let mut errfd = 1; + + while nfds > 0 { + // wait for either pipe to become readable using `select` + let r = unsafe { libc::poll(fds.as_mut_ptr(), nfds, -1) }; + if r == -1 { + let err = io::Error::last_os_error(); + if err.kind() == io::ErrorKind::Interrupted { + continue; + } + return Err(err); + } + + // Read as much as we can from each pipe, ignoring EWOULDBLOCK or + // EAGAIN. If we hit EOF, then this will happen because the underlying + // reader will return Ok(0), in which case we'll see `Ok` ourselves. In + // this case we flip the other fd back into blocking mode and read + // whatever's leftover on that file descriptor. + let handle = |res: io::Result<_>| match res { + Ok(_) => Ok(true), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + Ok(false) + } else { + Err(e) + } + } + }; + if !err_done && fds[errfd].revents != 0 && handle(err_pipe.read_to_end(&mut err))? { + err_done = true; + nfds -= 1; + } + data(false, &mut err, err_done); + if !out_done && fds[0].revents != 0 && handle(out_pipe.read_to_end(&mut out))? { + out_done = true; + fds[0].fd = err_pipe.as_raw_fd(); + errfd = 0; + nfds -= 1; + } + data(true, &mut out, out_done); + } + Ok(()) + } +} + +#[cfg(windows)] +mod imp { + use std::io; + use std::os::windows::prelude::*; + use std::process::{ChildStderr, ChildStdout}; + use std::slice; + + use miow::iocp::{CompletionPort, CompletionStatus}; + use miow::pipe::NamedPipe; + use miow::Overlapped; + use winapi::shared::winerror::ERROR_BROKEN_PIPE; + + struct Pipe<'a> { + dst: &'a mut Vec, + overlapped: Overlapped, + pipe: NamedPipe, + done: bool, + } + + pub fn read2( + out_pipe: ChildStdout, + err_pipe: ChildStderr, + data: &mut dyn FnMut(bool, &mut Vec, bool), + ) -> io::Result<()> { + let mut out = Vec::new(); + let mut err = Vec::new(); + + let port = CompletionPort::new(1)?; + port.add_handle(0, &out_pipe)?; + port.add_handle(1, &err_pipe)?; + + unsafe { + let mut out_pipe = Pipe::new(out_pipe, &mut out); + let mut err_pipe = Pipe::new(err_pipe, &mut err); + + out_pipe.read()?; + err_pipe.read()?; + + let mut status = [CompletionStatus::zero(), CompletionStatus::zero()]; + + while !out_pipe.done || !err_pipe.done { + for status in port.get_many(&mut status, None)? { + if status.token() == 0 { + out_pipe.complete(status); + data(true, out_pipe.dst, out_pipe.done); + out_pipe.read()?; + } else { + err_pipe.complete(status); + data(false, err_pipe.dst, err_pipe.done); + err_pipe.read()?; + } + } + } + + Ok(()) + } + } + + impl<'a> Pipe<'a> { + unsafe fn new(p: P, dst: &'a mut Vec) -> Pipe<'a> { + Pipe { + dst, + pipe: NamedPipe::from_raw_handle(p.into_raw_handle()), + overlapped: Overlapped::zero(), + done: false, + } + } + + unsafe fn read(&mut self) -> io::Result<()> { + let dst = slice_to_end(self.dst); + match self.pipe.read_overlapped(dst, self.overlapped.raw()) { + Ok(_) => Ok(()), + Err(e) => { + if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { + self.done = true; + Ok(()) + } else { + Err(e) + } + } + } + } + + unsafe fn complete(&mut self, status: &CompletionStatus) { + let prev = self.dst.len(); + self.dst.set_len(prev + status.bytes_transferred() as usize); + if status.bytes_transferred() == 0 { + self.done = true; + } + } + } + + unsafe fn slice_to_end(v: &mut Vec) -> &mut [u8] { + if v.capacity() == 0 { + v.reserve(16); + } + if v.capacity() == v.len() { + v.reserve(1); + } + slice::from_raw_parts_mut(v.as_mut_ptr().add(v.len()), v.capacity() - v.len()) + } +} diff --git a/src/cargo/util/rustc.rs b/src/cargo/util/rustc.rs new file mode 100644 index 000000000..59fd23041 --- /dev/null +++ b/src/cargo/util/rustc.rs @@ -0,0 +1,278 @@ +#![allow(deprecated)] // for SipHasher + +use std::collections::hash_map::{Entry, HashMap}; +use std::env; +use std::hash::{Hash, Hasher, SipHasher}; +use std::path::{Path, PathBuf}; +use std::process::Stdio; +use std::sync::Mutex; + +use log::{debug, info, warn}; +use serde::{Deserialize, Serialize}; + +use crate::util::paths; +use crate::util::{self, internal, profile, CargoResult, ProcessBuilder}; + +/// Information on the `rustc` executable +#[derive(Debug)] +pub struct Rustc { + /// The location of the exe + pub path: PathBuf, + /// An optional program that will be passed the path of the rust exe as its first argument, and + /// rustc args following this. + pub wrapper: Option, + /// Verbose version information (the output of `rustc -vV`) + pub verbose_version: String, + /// The host triple (arch-platform-OS), this comes from verbose_version. + pub host: String, + cache: Mutex, +} + +impl Rustc { + /// Runs the compiler at `path` to learn various pieces of information about + /// it, with an optional wrapper. + /// + /// If successful this function returns a description of the compiler along + /// with a list of its capabilities. + pub fn new( + path: PathBuf, + wrapper: Option, + rustup_rustc: &Path, + cache_location: Option, + ) -> CargoResult { + let _p = profile::start("Rustc::new"); + + let mut cache = Cache::load(&path, rustup_rustc, cache_location); + + let mut cmd = util::process(&path); + cmd.arg("-vV"); + let verbose_version = cache.cached_output(&cmd)?.0; + + let host = { + let triple = verbose_version + .lines() + .find(|l| l.starts_with("host: ")) + .map(|l| &l[6..]) + .ok_or_else(|| internal("rustc -v didn't have a line for `host:`"))?; + triple.to_string() + }; + + Ok(Rustc { + path, + wrapper, + verbose_version, + host, + cache: Mutex::new(cache), + }) + } + + /// Gets a process builder set up to use the found rustc version, with a wrapper if `Some`. + pub fn process(&self) -> ProcessBuilder { + match self.wrapper { + Some(ref wrapper) if !wrapper.as_os_str().is_empty() => { + let mut cmd = util::process(wrapper); + cmd.arg(&self.path); + cmd + } + _ => self.process_no_wrapper(), + } + } + + pub fn process_no_wrapper(&self) -> ProcessBuilder { + util::process(&self.path) + } + + pub fn cached_output(&self, cmd: &ProcessBuilder) -> CargoResult<(String, String)> { + self.cache.lock().unwrap().cached_output(cmd) + } + + pub fn cached_success(&self, cmd: &ProcessBuilder) -> CargoResult { + self.cache.lock().unwrap().cached_success(cmd) + } +} + +/// It is a well known that `rustc` is not the fastest compiler in the world. +/// What is less known is that even `rustc --version --verbose` takes about a +/// hundred milliseconds! Because we need compiler version info even for no-op +/// builds, we cache it here, based on compiler's mtime and rustup's current +/// toolchain. +/// +/// https://github.com/rust-lang/cargo/issues/5315 +/// https://github.com/rust-lang/rust/issues/49761 +#[derive(Debug)] +struct Cache { + cache_location: Option, + dirty: bool, + data: CacheData, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +struct CacheData { + rustc_fingerprint: u64, + outputs: HashMap, + successes: HashMap, +} + +impl Cache { + fn load(rustc: &Path, rustup_rustc: &Path, cache_location: Option) -> Cache { + match (cache_location, rustc_fingerprint(rustc, rustup_rustc)) { + (Some(cache_location), Ok(rustc_fingerprint)) => { + let empty = CacheData { + rustc_fingerprint, + outputs: HashMap::new(), + successes: HashMap::new(), + }; + let mut dirty = true; + let data = match read(&cache_location) { + Ok(data) => { + if data.rustc_fingerprint == rustc_fingerprint { + info!("reusing existing rustc info cache"); + dirty = false; + data + } else { + info!("different compiler, creating new rustc info cache"); + empty + } + } + Err(e) => { + info!("failed to read rustc info cache: {}", e); + empty + } + }; + return Cache { + cache_location: Some(cache_location), + dirty, + data, + }; + + fn read(path: &Path) -> CargoResult { + let json = paths::read(path)?; + Ok(serde_json::from_str(&json)?) + } + } + (_, fingerprint) => { + if let Err(e) = fingerprint { + warn!("failed to calculate rustc fingerprint: {}", e); + } + info!("rustc info cache disabled"); + Cache { + cache_location: None, + dirty: false, + data: CacheData::default(), + } + } + } + } + + fn cached_output(&mut self, cmd: &ProcessBuilder) -> CargoResult<(String, String)> { + let key = process_fingerprint(cmd); + match self.data.outputs.entry(key) { + Entry::Occupied(entry) => { + info!("rustc info cache hit"); + Ok(entry.get().clone()) + } + Entry::Vacant(entry) => { + info!("rustc info cache miss"); + let output = cmd.exec_with_output()?; + let stdout = String::from_utf8(output.stdout) + .map_err(|_| internal("rustc didn't return utf8 output"))?; + let stderr = String::from_utf8(output.stderr) + .map_err(|_| internal("rustc didn't return utf8 output"))?; + let output = (stdout, stderr); + entry.insert(output.clone()); + self.dirty = true; + Ok(output) + } + } + } + + fn cached_success(&mut self, cmd: &ProcessBuilder) -> CargoResult { + let key = process_fingerprint(cmd); + match self.data.successes.entry(key) { + Entry::Occupied(entry) => { + info!("rustc info cache hit"); + Ok(*entry.get()) + } + Entry::Vacant(entry) => { + info!("rustc info cache miss"); + let success = cmd + .build_command() + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status()? + .success(); + entry.insert(success); + self.dirty = true; + Ok(success) + } + } + } +} + +impl Drop for Cache { + fn drop(&mut self) { + if !self.dirty { + return; + } + if let Some(ref path) = self.cache_location { + let json = serde_json::to_string(&self.data).unwrap(); + match paths::write(path, json.as_bytes()) { + Ok(()) => info!("updated rustc info cache"), + Err(e) => warn!("failed to update rustc info cache: {}", e), + } + } + } +} + +fn rustc_fingerprint(path: &Path, rustup_rustc: &Path) -> CargoResult { + let mut hasher = SipHasher::new_with_keys(0, 0); + + let path = paths::resolve_executable(path)?; + path.hash(&mut hasher); + + paths::mtime(&path)?.hash(&mut hasher); + + // Rustup can change the effective compiler without touching + // the `rustc` binary, so we try to account for this here. + // If we see rustup's env vars, we mix them into the fingerprint, + // but we also mix in the mtime of the actual compiler (and not + // the rustup shim at `~/.cargo/bin/rustup`), because `RUSTUP_TOOLCHAIN` + // could be just `stable-x86_64-unknown-linux-gnu`, i.e, it could + // not mention the version of Rust at all, which changes after + // `rustup update`. + // + // If we don't see rustup env vars, but it looks like the compiler + // is managed by rustup, we conservatively bail out. + let maybe_rustup = rustup_rustc == path; + match ( + maybe_rustup, + env::var("RUSTUP_HOME"), + env::var("RUSTUP_TOOLCHAIN"), + ) { + (_, Ok(rustup_home), Ok(rustup_toolchain)) => { + debug!("adding rustup info to rustc fingerprint"); + rustup_toolchain.hash(&mut hasher); + rustup_home.hash(&mut hasher); + let real_rustc = Path::new(&rustup_home) + .join("toolchains") + .join(rustup_toolchain) + .join("bin") + .join("rustc") + .with_extension(env::consts::EXE_EXTENSION); + paths::mtime(&real_rustc)?.hash(&mut hasher); + } + (true, _, _) => failure::bail!("probably rustup rustc, but without rustup's env vars"), + _ => (), + } + + Ok(hasher.finish()) +} + +fn process_fingerprint(cmd: &ProcessBuilder) -> u64 { + let mut hasher = SipHasher::new_with_keys(0, 0); + cmd.get_args().hash(&mut hasher); + let mut env = cmd.get_envs().iter().collect::>(); + env.sort_unstable(); + env.hash(&mut hasher); + hasher.finish() +} diff --git a/src/cargo/util/sha256.rs b/src/cargo/util/sha256.rs new file mode 100644 index 000000000..32d04ae93 --- /dev/null +++ b/src/cargo/util/sha256.rs @@ -0,0 +1,29 @@ +use self::crypto_hash::{Algorithm, Hasher}; +use crypto_hash; +use std::io::Write; + +pub struct Sha256(Hasher); + +impl Sha256 { + pub fn new() -> Sha256 { + let hasher = Hasher::new(Algorithm::SHA256); + Sha256(hasher) + } + + pub fn update(&mut self, bytes: &[u8]) { + let _ = self.0.write_all(bytes); + } + + pub fn finish(&mut self) -> [u8; 32] { + let mut ret = [0u8; 32]; + let data = self.0.finish(); + ret.copy_from_slice(&data[..]); + ret + } +} + +impl Default for Sha256 { + fn default() -> Self { + Self::new() + } +} diff --git a/src/cargo/util/to_semver.rs b/src/cargo/util/to_semver.rs new file mode 100644 index 000000000..65cc078fd --- /dev/null +++ b/src/cargo/util/to_semver.rs @@ -0,0 +1,33 @@ +use crate::util::errors::CargoResult; +use semver::Version; + +pub trait ToSemver { + fn to_semver(self) -> CargoResult; +} + +impl ToSemver for Version { + fn to_semver(self) -> CargoResult { + Ok(self) + } +} + +impl<'a> ToSemver for &'a str { + fn to_semver(self) -> CargoResult { + match Version::parse(self) { + Ok(v) => Ok(v), + Err(..) => Err(failure::format_err!("cannot parse '{}' as a semver", self)), + } + } +} + +impl<'a> ToSemver for &'a String { + fn to_semver(self) -> CargoResult { + (**self).to_semver() + } +} + +impl<'a> ToSemver for &'a Version { + fn to_semver(self) -> CargoResult { + Ok(self.clone()) + } +} diff --git a/src/cargo/util/to_url.rs b/src/cargo/util/to_url.rs new file mode 100644 index 000000000..e354c0401 --- /dev/null +++ b/src/cargo/util/to_url.rs @@ -0,0 +1,24 @@ +use std::path::Path; + +use url::Url; + +use crate::util::CargoResult; + +/// A type that can be converted to a Url +pub trait ToUrl { + /// Performs the conversion + fn to_url(self) -> CargoResult; +} + +impl<'a> ToUrl for &'a str { + fn to_url(self) -> CargoResult { + Url::parse(self).map_err(|s| failure::format_err!("invalid url `{}`: {}", self, s)) + } +} + +impl<'a> ToUrl for &'a Path { + fn to_url(self) -> CargoResult { + Url::from_file_path(self) + .map_err(|()| failure::format_err!("invalid path url `{}`", self.display())) + } +} diff --git a/src/cargo/util/toml/mod.rs b/src/cargo/util/toml/mod.rs new file mode 100644 index 000000000..eb1354bf2 --- /dev/null +++ b/src/cargo/util/toml/mod.rs @@ -0,0 +1,1523 @@ +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::fmt; +use std::fs; +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::str; + +use log::{debug, trace}; +use semver::{self, VersionReq}; +use serde::de; +use serde::ser; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::core::dependency::{Kind, Platform}; +use crate::core::manifest::{LibKind, ManifestMetadata, TargetSourcePath, Warnings}; +use crate::core::profiles::Profiles; +use crate::core::{Dependency, Manifest, PackageId, Summary, Target}; +use crate::core::{Edition, EitherManifest, Feature, Features, VirtualManifest}; +use crate::core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; +use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY}; +use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; +use crate::util::paths; +use crate::util::{self, validate_package_name, Config, ToUrl}; + +mod targets; +use self::targets::targets; + +pub fn read_manifest( + path: &Path, + source_id: SourceId, + config: &Config, +) -> Result<(EitherManifest, Vec), ManifestError> { + trace!( + "read_manifest; path={}; source-id={}", + path.display(), + source_id + ); + let contents = paths::read(path).map_err(|err| ManifestError::new(err, path.into()))?; + + do_read_manifest(&contents, path, source_id, config) + .chain_err(|| format!("failed to parse manifest at `{}`", path.display())) + .map_err(|err| ManifestError::new(err, path.into())) +} + +fn do_read_manifest( + contents: &str, + manifest_file: &Path, + source_id: SourceId, + config: &Config, +) -> CargoResult<(EitherManifest, Vec)> { + let package_root = manifest_file.parent().unwrap(); + + let toml = { + let pretty_filename = + manifest_file.strip_prefix(config.cwd()).unwrap_or(manifest_file); + parse(contents, pretty_filename, config)? + }; + + let mut unused = BTreeSet::new(); + let manifest: TomlManifest = serde_ignored::deserialize(toml, |path| { + let mut key = String::new(); + stringify(&mut key, &path); + unused.insert(key); + })?; + let add_unused = |warnings: &mut Warnings| { + for key in unused { + warnings.add_warning(format!("unused manifest key: {}", key)); + if key == "profile.debug" || key == "profiles.debug" { + warnings.add_warning("use `[profile.dev]` to configure debug builds".to_string()); + } + } + }; + + let manifest = Rc::new(manifest); + return if manifest.project.is_some() || manifest.package.is_some() { + let (mut manifest, paths) = + TomlManifest::to_real_manifest(&manifest, source_id, package_root, config)?; + add_unused(manifest.warnings_mut()); + if !manifest.targets().iter().any(|t| !t.is_custom_build()) { + failure::bail!( + "no targets specified in the manifest\n \ + either src/lib.rs, src/main.rs, a [lib] section, or \ + [[bin]] section must be present" + ) + } + Ok((EitherManifest::Real(manifest), paths)) + } else { + let (mut m, paths) = + TomlManifest::to_virtual_manifest(&manifest, source_id, package_root, config)?; + add_unused(m.warnings_mut()); + Ok((EitherManifest::Virtual(m), paths)) + }; + + fn stringify(dst: &mut String, path: &serde_ignored::Path<'_>) { + use serde_ignored::Path; + + match *path { + Path::Root => {} + Path::Seq { parent, index } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(&index.to_string()); + } + Path::Map { parent, ref key } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(key); + } + Path::Some { parent } + | Path::NewtypeVariant { parent } + | Path::NewtypeStruct { parent } => stringify(dst, parent), + } + } +} + +pub fn parse(toml: &str, file: &Path, config: &Config) -> CargoResult { + let first_error = match toml.parse() { + Ok(ret) => return Ok(ret), + Err(e) => e, + }; + + let mut second_parser = toml::de::Deserializer::new(toml); + second_parser.set_require_newline_after_table(false); + if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { + let msg = format!( + "\ +TOML file found which contains invalid syntax and will soon not parse +at `{}`. + +The TOML spec requires newlines after table definitions (e.g., `[a] b = 1` is +invalid), but this file has a table header which does not have a newline after +it. A newline needs to be added and this warning will soon become a hard error +in the future.", + file.display() + ); + config.shell().warn(&msg)?; + return Ok(ret); + } + + let first_error = failure::Error::from(first_error); + Err(first_error.context("could not parse input as TOML").into()) +} + +type TomlLibTarget = TomlTarget; +type TomlBinTarget = TomlTarget; +type TomlExampleTarget = TomlTarget; +type TomlTestTarget = TomlTarget; +type TomlBenchTarget = TomlTarget; + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum TomlDependency { + Simple(String), + Detailed(DetailedTomlDependency), +} + +impl<'de> de::Deserialize<'de> for TomlDependency { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct TomlDependencyVisitor; + + impl<'de> de::Visitor<'de> for TomlDependencyVisitor { + type Value = TomlDependency; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str( + "a version string like \"0.9.8\" or a \ + detailed dependency like { version = \"0.9.8\" }", + ) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(TomlDependency::Simple(s.to_owned())) + } + + fn visit_map(self, map: V) -> Result + where + V: de::MapAccess<'de>, + { + let mvd = de::value::MapAccessDeserializer::new(map); + DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) + } + } + + deserializer.deserialize_any(TomlDependencyVisitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +#[serde(rename_all = "kebab-case")] +pub struct DetailedTomlDependency { + version: Option, + registry: Option, + /// The URL of the `registry` field. + /// This is an internal implementation detail. When Cargo creates a + /// package, it replaces `registry` with `registry-index` so that the + /// manifest contains the correct URL. All users won't have the same + /// registry names configured, so Cargo can't rely on just the name for + /// crates published by other users. + registry_index: Option, + path: Option, + git: Option, + branch: Option, + tag: Option, + rev: Option, + features: Option>, + optional: Option, + default_features: Option, + #[serde(rename = "default_features")] + default_features2: Option, + package: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct TomlManifest { + cargo_features: Option>, + package: Option>, + project: Option>, + profile: Option, + lib: Option, + bin: Option>, + example: Option>, + test: Option>, + bench: Option>, + dependencies: Option>, + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + features: Option>>, + target: Option>, + replace: Option>, + patch: Option>>, + workspace: Option, + badges: Option>>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct TomlProfiles { + pub test: Option, + pub doc: Option, + pub bench: Option, + pub dev: Option, + pub release: Option, +} + +impl TomlProfiles { + pub fn validate(&self, features: &Features, warnings: &mut Vec) -> CargoResult<()> { + if let Some(ref test) = self.test { + test.validate("test", features, warnings)?; + } + if let Some(ref doc) = self.doc { + doc.validate("doc", features, warnings)?; + } + if let Some(ref bench) = self.bench { + bench.validate("bench", features, warnings)?; + } + if let Some(ref dev) = self.dev { + dev.validate("dev", features, warnings)?; + } + if let Some(ref release) = self.release { + release.validate("release", features, warnings)?; + } + Ok(()) + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TomlOptLevel(pub String); + +impl<'de> de::Deserialize<'de> for TomlOptLevel { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = TomlOptLevel; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("an optimization level") + } + + fn visit_i64(self, value: i64) -> Result + where + E: de::Error, + { + Ok(TomlOptLevel(value.to_string())) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if value == "s" || value == "z" { + Ok(TomlOptLevel(value.to_string())) + } else { + Err(E::custom(format!( + "must be an integer, `z`, or `s`, \ + but found: {}", + value + ))) + } + } + } + + d.deserialize_any(Visitor) + } +} + +impl ser::Serialize for TomlOptLevel { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + match self.0.parse::() { + Ok(n) => n.serialize(serializer), + Err(_) => self.0.serialize(serializer), + } + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum U32OrBool { + U32(u32), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for U32OrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = U32OrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or an integer") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(U32OrBool::Bool(b)) + } + + fn visit_i64(self, u: i64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_u64(self, u: u64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, Eq, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct TomlProfile { + pub opt_level: Option, + pub lto: Option, + pub codegen_units: Option, + pub debug: Option, + pub debug_assertions: Option, + pub rpath: Option, + pub panic: Option, + pub overflow_checks: Option, + pub incremental: Option, + pub overrides: Option>, + pub build_override: Option>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub enum ProfilePackageSpec { + Spec(PackageIdSpec), + All, +} + +impl ser::Serialize for ProfilePackageSpec { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + ProfilePackageSpec::Spec(ref spec) => spec.serialize(s), + ProfilePackageSpec::All => "*".serialize(s), + } + } +} + +impl<'de> de::Deserialize<'de> for ProfilePackageSpec { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + if string == "*" { + Ok(ProfilePackageSpec::All) + } else { + PackageIdSpec::parse(&string) + .map_err(de::Error::custom) + .map(ProfilePackageSpec::Spec) + } + } +} + +impl TomlProfile { + pub fn validate( + &self, + name: &str, + features: &Features, + warnings: &mut Vec, + ) -> CargoResult<()> { + if let Some(ref profile) = self.build_override { + features.require(Feature::profile_overrides())?; + profile.validate_override()?; + } + if let Some(ref override_map) = self.overrides { + features.require(Feature::profile_overrides())?; + for profile in override_map.values() { + profile.validate_override()?; + } + } + + match name { + "dev" | "release" => {} + _ => { + if self.overrides.is_some() || self.build_override.is_some() { + failure::bail!( + "Profile overrides may only be specified for \ + `dev` or `release` profile, not `{}`.", + name + ); + } + } + } + + match name { + "doc" => { + warnings.push("profile `doc` is deprecated and has no effect".to_string()); + } + "test" | "bench" => { + if self.panic.is_some() { + warnings.push(format!("`panic` setting is ignored for `{}` profile", name)) + } + } + _ => {} + } + Ok(()) + } + + fn validate_override(&self) -> CargoResult<()> { + if self.overrides.is_some() || self.build_override.is_some() { + failure::bail!("Profile overrides cannot be nested."); + } + if self.panic.is_some() { + failure::bail!("`panic` may not be specified in a profile override.") + } + if self.lto.is_some() { + failure::bail!("`lto` may not be specified in a profile override.") + } + if self.rpath.is_some() { + failure::bail!("`rpath` may not be specified in a profile override.") + } + Ok(()) + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +pub struct StringOrVec(Vec); + +impl<'de> de::Deserialize<'de> for StringOrVec { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrVec; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("string or list of strings") + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(StringOrVec(vec![s.to_string()])) + } + + fn visit_seq(self, v: V) -> Result + where + V: de::SeqAccess<'de>, + { + let seq = de::value::SeqAccessDeserializer::new(v); + Vec::deserialize(seq).map(StringOrVec) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum StringOrBool { + String(String), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for StringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or a string") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(StringOrBool::Bool(b)) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(StringOrBool::String(s.to_string())) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum VecStringOrBool { + VecString(Vec), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for VecStringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = VecStringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or vector of strings") + } + + fn visit_seq(self, v: V) -> Result + where + V: de::SeqAccess<'de>, + { + let seq = de::value::SeqAccessDeserializer::new(v); + Vec::deserialize(seq).map(VecStringOrBool::VecString) + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(VecStringOrBool::Bool(b)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +/// Represents the `package`/`project` sections of a `Cargo.toml`. +/// +/// Note that the order of the fields matters, since this is the order they +/// are serialized to a TOML file. For example, you cannot have values after +/// the field `metadata`, since it is a table and values cannot appear after +/// tables. +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct TomlProject { + edition: Option, + name: String, + version: semver::Version, + authors: Option>, + build: Option, + metabuild: Option, + links: Option, + exclude: Option>, + include: Option>, + publish: Option, + #[serde(rename = "publish-lockfile")] + publish_lockfile: Option, + workspace: Option, + #[serde(rename = "im-a-teapot")] + im_a_teapot: Option, + autobins: Option, + autoexamples: Option, + autotests: Option, + autobenches: Option, + #[serde(rename = "namespaced-features")] + namespaced_features: Option, + #[serde(rename = "default-run")] + default_run: Option, + + // Package metadata. + description: Option, + homepage: Option, + documentation: Option, + readme: Option, + keywords: Option>, + categories: Option>, + license: Option, + #[serde(rename = "license-file")] + license_file: Option, + repository: Option, + metadata: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TomlWorkspace { + members: Option>, + #[serde(rename = "default-members")] + default_members: Option>, + exclude: Option>, +} + +impl TomlProject { + pub fn to_package_id(&self, source_id: SourceId) -> CargoResult { + PackageId::new(&self.name, self.version.clone(), source_id) + } +} + +struct Context<'a, 'b> { + pkgid: Option, + deps: &'a mut Vec, + source_id: SourceId, + nested_paths: &'a mut Vec, + config: &'b Config, + warnings: &'a mut Vec, + platform: Option, + root: &'a Path, + features: &'a Features, +} + +impl TomlManifest { + pub fn prepare_for_publish(&self, config: &Config) -> CargoResult { + let mut package = self + .package + .as_ref() + .or_else(|| self.project.as_ref()) + .unwrap() + .clone(); + package.workspace = None; + return Ok(TomlManifest { + package: Some(package), + project: None, + profile: self.profile.clone(), + lib: self.lib.clone(), + bin: self.bin.clone(), + example: self.example.clone(), + test: self.test.clone(), + bench: self.bench.clone(), + dependencies: map_deps(config, self.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + self.dev_dependencies + .as_ref() + .or_else(|| self.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + self.build_dependencies + .as_ref() + .or_else(|| self.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + features: self.features.clone(), + target: match self.target.as_ref().map(|target_map| { + target_map + .iter() + .map(|(k, v)| { + Ok(( + k.clone(), + TomlPlatform { + dependencies: map_deps(config, v.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + v.dev_dependencies + .as_ref() + .or_else(|| v.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + v.build_dependencies + .as_ref() + .or_else(|| v.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + }, + )) + }) + .collect() + }) { + Some(Ok(v)) => Some(v), + Some(Err(e)) => return Err(e), + None => None, + }, + replace: None, + patch: None, + workspace: None, + badges: self.badges.clone(), + cargo_features: self.cargo_features.clone(), + }); + + fn map_deps( + config: &Config, + deps: Option<&BTreeMap>, + ) -> CargoResult>> { + let deps = match deps { + Some(deps) => deps, + None => return Ok(None), + }; + let deps = deps + .iter() + .map(|(k, v)| Ok((k.clone(), map_dependency(config, v)?))) + .collect::>>()?; + Ok(Some(deps)) + } + + fn map_dependency(config: &Config, dep: &TomlDependency) -> CargoResult { + match *dep { + TomlDependency::Detailed(ref d) => { + let mut d = d.clone(); + d.path.take(); // path dependencies become crates.io deps + // registry specifications are elaborated to the index URL + if let Some(registry) = d.registry.take() { + let src = SourceId::alt_registry(config, ®istry)?; + d.registry_index = Some(src.url().to_string()); + } + Ok(TomlDependency::Detailed(d)) + } + TomlDependency::Simple(ref s) => { + Ok(TomlDependency::Detailed(DetailedTomlDependency { + version: Some(s.clone()), + ..Default::default() + })) + } + } + } + } + + fn to_real_manifest( + me: &Rc, + source_id: SourceId, + package_root: &Path, + config: &Config, + ) -> CargoResult<(Manifest, Vec)> { + let mut nested_paths = vec![]; + let mut warnings = vec![]; + let mut errors = vec![]; + + // Parse features first so they will be available when parsing other parts of the TOML. + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(&cargo_features, &mut warnings)?; + + let project = me.project.as_ref().or_else(|| me.package.as_ref()); + let project = project.ok_or_else(|| failure::format_err!("no `package` section found"))?; + + let package_name = project.name.trim(); + if package_name.is_empty() { + failure::bail!("package name cannot be an empty string") + } + + validate_package_name(package_name, "package name", "")?; + + let pkgid = project.to_package_id(source_id)?; + + let edition = if let Some(ref edition) = project.edition { + features + .require(Feature::edition()) + .chain_err(|| "editions are unstable")?; + edition + .parse() + .chain_err(|| "failed to parse the `edition` key")? + } else { + Edition::Edition2015 + }; + + if project.metabuild.is_some() { + features.require(Feature::metabuild())?; + } + + // If we have no lib at all, use the inferred lib, if available. + // If we have a lib with a path, we're done. + // If we have a lib with no path, use the inferred lib or else the package name. + let targets = targets( + &features, + me, + package_name, + package_root, + edition, + &project.build, + &project.metabuild, + &mut warnings, + &mut errors, + )?; + + if targets.is_empty() { + debug!("manifest has no build targets"); + } + + if let Err(e) = unique_build_targets(&targets, package_root) { + warnings.push(format!( + "file found to be present in multiple \ + build targets: {}", + e + )); + } + + let mut deps = Vec::new(); + let replace; + let patch; + + { + let mut cx = Context { + pkgid: Some(pkgid), + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + features: &features, + platform: None, + root: package_root, + }; + + fn process_dependencies( + cx: &mut Context<'_, '_>, + new_deps: Option<&BTreeMap>, + kind: Option, + ) -> CargoResult<()> { + let dependencies = match new_deps { + Some(dependencies) => dependencies, + None => return Ok(()), + }; + for (n, v) in dependencies.iter() { + let dep = v.to_dependency(n, cx, kind)?; + cx.deps.push(dep); + } + + Ok(()) + } + + // Collect the dependencies. + process_dependencies(&mut cx, me.dependencies.as_ref(), None)?; + let dev_deps = me + .dev_dependencies + .as_ref() + .or_else(|| me.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + let build_deps = me + .build_dependencies + .as_ref() + .or_else(|| me.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + + for (name, platform) in me.target.iter().flat_map(|t| t) { + cx.platform = Some(name.parse()?); + process_dependencies(&mut cx, platform.dependencies.as_ref(), None)?; + let build_deps = platform + .build_dependencies + .as_ref() + .or_else(|| platform.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + let dev_deps = platform + .dev_dependencies + .as_ref() + .or_else(|| platform.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + } + + replace = me.replace(&mut cx)?; + patch = me.patch(&mut cx)?; + } + + { + let mut names_sources = BTreeMap::new(); + for dep in &deps { + let name = dep.name_in_toml(); + let prev = names_sources.insert(name.to_string(), dep.source_id()); + if prev.is_some() && prev != Some(dep.source_id()) { + failure::bail!( + "Dependency '{}' has different source paths depending on the build \ + target. Each dependency must have a single canonical source path \ + irrespective of build target.", + name + ); + } + } + } + + let exclude = project.exclude.clone().unwrap_or_default(); + let include = project.include.clone().unwrap_or_default(); + if project.namespaced_features.is_some() { + features.require(Feature::namespaced_features())?; + } + + let summary = Summary::new( + pkgid, + deps, + &me.features + .as_ref() + .map(|x| { + x.iter() + .map(|(k, v)| (k.as_str(), v.iter().collect())) + .collect() + }) + .unwrap_or_else(BTreeMap::new), + project.links.as_ref().map(|x| x.as_str()), + project.namespaced_features.unwrap_or(false), + )?; + let metadata = ManifestMetadata { + description: project.description.clone(), + homepage: project.homepage.clone(), + documentation: project.documentation.clone(), + readme: project.readme.clone(), + authors: project.authors.clone().unwrap_or_default(), + license: project.license.clone(), + license_file: project.license_file.clone(), + repository: project.repository.clone(), + keywords: project.keywords.clone().unwrap_or_default(), + categories: project.categories.clone().unwrap_or_default(), + badges: me.badges.clone().unwrap_or_default(), + links: project.links.clone(), + }; + + let workspace_config = match (me.workspace.as_ref(), project.workspace.as_ref()) { + (Some(config), None) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + &package_root, + &config.members, + &config.default_members, + &config.exclude, + )), + (None, root) => WorkspaceConfig::Member { + root: root.cloned(), + }, + (Some(..), Some(..)) => failure::bail!( + "cannot configure both `package.workspace` and \ + `[workspace]`, only one can be specified" + ), + }; + let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; + let publish = match project.publish { + Some(VecStringOrBool::VecString(ref vecstring)) => Some(vecstring.clone()), + Some(VecStringOrBool::Bool(false)) => Some(vec![]), + None | Some(VecStringOrBool::Bool(true)) => None, + }; + + let publish_lockfile = match project.publish_lockfile { + Some(b) => { + features.require(Feature::publish_lockfile())?; + b + } + None => false, + }; + + if summary.features().contains_key("default-features") { + warnings.push( + "`default-features = [\"..\"]` was found in [features]. \ + Did you mean to use `default = [\"..\"]`?" + .to_string(), + ) + } + + let custom_metadata = project.metadata.clone(); + let mut manifest = Manifest::new( + summary, + targets, + exclude, + include, + project.links.clone(), + metadata, + custom_metadata, + profiles, + publish, + publish_lockfile, + replace, + patch, + workspace_config, + features, + edition, + project.im_a_teapot, + project.default_run.clone(), + Rc::clone(me), + project.metabuild.clone().map(|sov| sov.0), + ); + if project.license_file.is_some() && project.license.is_some() { + manifest.warnings_mut().add_warning( + "only one of `license` or \ + `license-file` is necessary" + .to_string(), + ); + } + for warning in warnings { + manifest.warnings_mut().add_warning(warning); + } + for error in errors { + manifest.warnings_mut().add_critical_warning(error); + } + + manifest.feature_gate()?; + + Ok((manifest, nested_paths)) + } + + fn to_virtual_manifest( + me: &Rc, + source_id: SourceId, + root: &Path, + config: &Config, + ) -> CargoResult<(VirtualManifest, Vec)> { + if me.project.is_some() { + failure::bail!("virtual manifests do not define [project]"); + } + if me.package.is_some() { + failure::bail!("virtual manifests do not define [package]"); + } + if me.lib.is_some() { + failure::bail!("virtual manifests do not specify [lib]"); + } + if me.bin.is_some() { + failure::bail!("virtual manifests do not specify [[bin]]"); + } + if me.example.is_some() { + failure::bail!("virtual manifests do not specify [[example]]"); + } + if me.test.is_some() { + failure::bail!("virtual manifests do not specify [[test]]"); + } + if me.bench.is_some() { + failure::bail!("virtual manifests do not specify [[bench]]"); + } + if me.dependencies.is_some() { + failure::bail!("virtual manifests do not specify [dependencies]"); + } + if me.dev_dependencies.is_some() || me.dev_dependencies2.is_some() { + failure::bail!("virtual manifests do not specify [dev-dependencies]"); + } + if me.build_dependencies.is_some() || me.build_dependencies2.is_some() { + failure::bail!("virtual manifests do not specify [build-dependencies]"); + } + if me.features.is_some() { + failure::bail!("virtual manifests do not specify [features]"); + } + if me.target.is_some() { + failure::bail!("virtual manifests do not specify [target]"); + } + if me.badges.is_some() { + failure::bail!("virtual manifests do not specify [badges]"); + } + + let mut nested_paths = Vec::new(); + let mut warnings = Vec::new(); + let mut deps = Vec::new(); + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(&cargo_features, &mut warnings)?; + + let (replace, patch) = { + let mut cx = Context { + pkgid: None, + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + platform: None, + features: &features, + root, + }; + (me.replace(&mut cx)?, me.patch(&mut cx)?) + }; + let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; + let workspace_config = match me.workspace { + Some(ref config) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + &root, + &config.members, + &config.default_members, + &config.exclude, + )), + None => { + failure::bail!("virtual manifests must be configured with [workspace]"); + } + }; + Ok(( + VirtualManifest::new(replace, patch, workspace_config, profiles, features), + nested_paths, + )) + } + + fn replace(&self, cx: &mut Context<'_, '_>) -> CargoResult> { + if self.patch.is_some() && self.replace.is_some() { + failure::bail!("cannot specify both [replace] and [patch]"); + } + let mut replace = Vec::new(); + for (spec, replacement) in self.replace.iter().flat_map(|x| x) { + let mut spec = PackageIdSpec::parse(spec).chain_err(|| { + format!( + "replacements must specify a valid semver \ + version to replace, but `{}` does not", + spec + ) + })?; + if spec.url().is_none() { + spec.set_url(CRATES_IO_INDEX.parse().unwrap()); + } + + let version_specified = match *replacement { + TomlDependency::Detailed(ref d) => d.version.is_some(), + TomlDependency::Simple(..) => true, + }; + if version_specified { + failure::bail!( + "replacements cannot specify a version \ + requirement, but found one for `{}`", + spec + ); + } + + let mut dep = replacement.to_dependency(spec.name(), cx, None)?; + { + let version = spec.version().ok_or_else(|| { + failure::format_err!( + "replacements must specify a version \ + to replace, but `{}` does not", + spec + ) + })?; + dep.set_version_req(VersionReq::exact(version)); + } + replace.push((spec, dep)); + } + Ok(replace) + } + + fn patch(&self, cx: &mut Context<'_, '_>) -> CargoResult>> { + let mut patch = HashMap::new(); + for (url, deps) in self.patch.iter().flat_map(|x| x) { + let url = match &url[..] { + CRATES_IO_REGISTRY => CRATES_IO_INDEX.parse().unwrap(), + _ => cx + .config + .get_registry_index(url) + .or_else(|_| url.to_url()) + .chain_err(|| { + format!("[patch] entry `{}` should be a URL or registry name", url) + })?, + }; + patch.insert( + url, + deps.iter() + .map(|(name, dep)| dep.to_dependency(name, cx, None)) + .collect::>>()?, + ); + } + Ok(patch) + } + + fn maybe_custom_build( + &self, + build: &Option, + package_root: &Path, + ) -> Option { + let build_rs = package_root.join("build.rs"); + match *build { + // Explicitly no build script. + Some(StringOrBool::Bool(false)) => None, + Some(StringOrBool::Bool(true)) => Some(build_rs), + Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), + None => { + match fs::metadata(&build_rs) { + // If there is a `build.rs` file next to the `Cargo.toml`, assume it is + // a build script. + Ok(ref e) if e.is_file() => Some(build_rs), + Ok(_) | Err(_) => None, + } + } + } + } + + pub fn has_profiles(&self) -> bool { + self.profile.is_some() + } +} + +/// Checks a list of build targets, and ensures the target names are unique within a vector. +/// If not, the name of the offending build target is returned. +fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { + let mut seen = HashSet::new(); + for target in targets { + if let TargetSourcePath::Path(path) = target.src_path() { + let full = package_root.join(path); + if !seen.insert(full.clone()) { + return Err(full.display().to_string()); + } + } + } + Ok(()) +} + +impl TomlDependency { + fn to_dependency( + &self, + name: &str, + cx: &mut Context<'_, '_>, + kind: Option, + ) -> CargoResult { + match *self { + TomlDependency::Simple(ref version) => DetailedTomlDependency { + version: Some(version.clone()), + ..Default::default() + } + .to_dependency(name, cx, kind), + TomlDependency::Detailed(ref details) => details.to_dependency(name, cx, kind), + } + } +} + +impl DetailedTomlDependency { + fn to_dependency( + &self, + name_in_toml: &str, + cx: &mut Context<'_, '_>, + kind: Option, + ) -> CargoResult { + if self.version.is_none() && self.path.is_none() && self.git.is_none() { + let msg = format!( + "dependency ({}) specified without \ + providing a local path, Git repository, or \ + version to use. This will be considered an \ + error in future versions", + name_in_toml + ); + cx.warnings.push(msg); + } + + if self.git.is_none() { + let git_only_keys = [ + (&self.branch, "branch"), + (&self.tag, "tag"), + (&self.rev, "rev"), + ]; + + for &(key, key_name) in &git_only_keys { + if key.is_some() { + let msg = format!( + "key `{}` is ignored for dependency ({}). \ + This will be considered an error in future versions", + key_name, name_in_toml + ); + cx.warnings.push(msg) + } + } + } + + let new_source_id = match ( + self.git.as_ref(), + self.path.as_ref(), + self.registry.as_ref(), + self.registry_index.as_ref(), + ) { + (Some(_), _, Some(_), _) | (Some(_), _, _, Some(_)) => failure::bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `registry` is allowed.", + name_in_toml + ), + (_, _, Some(_), Some(_)) => failure::bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `registry` or `registry-index` is allowed.", + name_in_toml + ), + (Some(git), maybe_path, _, _) => { + if maybe_path.is_some() { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `path` is allowed. \ + This will be considered an error in future versions", + name_in_toml + ); + cx.warnings.push(msg) + } + + let n_details = [&self.branch, &self.tag, &self.rev] + .iter() + .filter(|d| d.is_some()) + .count(); + + if n_details > 1 { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `branch`, `tag` or `rev` is allowed. \ + This will be considered an error in future versions", + name_in_toml + ); + cx.warnings.push(msg) + } + + let reference = self + .branch + .clone() + .map(GitReference::Branch) + .or_else(|| self.tag.clone().map(GitReference::Tag)) + .or_else(|| self.rev.clone().map(GitReference::Rev)) + .unwrap_or_else(|| GitReference::Branch("master".to_string())); + let loc = git.to_url()?; + SourceId::for_git(&loc, reference)? + } + (None, Some(path), _, _) => { + cx.nested_paths.push(PathBuf::from(path)); + // If the source ID for the package we're parsing is a path + // source, then we normalize the path here to get rid of + // components like `..`. + // + // The purpose of this is to get a canonical ID for the package + // that we're depending on to ensure that builds of this package + // always end up hashing to the same value no matter where it's + // built from. + if cx.source_id.is_path() { + let path = cx.root.join(path); + let path = util::normalize_path(&path); + SourceId::for_path(&path)? + } else { + cx.source_id + } + } + (None, None, Some(registry), None) => SourceId::alt_registry(cx.config, registry)?, + (None, None, None, Some(registry_index)) => { + let url = registry_index.to_url()?; + SourceId::for_registry(&url)? + } + (None, None, None, None) => SourceId::crates_io(cx.config)?, + }; + + let (pkg_name, explicit_name_in_toml) = match self.package { + Some(ref s) => (&s[..], Some(name_in_toml)), + None => (name_in_toml, None), + }; + + let version = self.version.as_ref().map(|v| &v[..]); + let mut dep = match cx.pkgid { + Some(id) => Dependency::parse(pkg_name, version, new_source_id, id, cx.config)?, + None => Dependency::parse_no_deprecated(pkg_name, version, new_source_id)?, + }; + dep.set_features(self.features.iter().flat_map(|x| x)) + .set_default_features( + self.default_features + .or(self.default_features2) + .unwrap_or(true), + ) + .set_optional(self.optional.unwrap_or(false)) + .set_platform(cx.platform.clone()); + if let Some(registry) = &self.registry { + let registry_id = SourceId::alt_registry(cx.config, registry)?; + dep.set_registry_id(registry_id); + } + if let Some(registry_index) = &self.registry_index { + let url = registry_index.to_url()?; + let registry_id = SourceId::for_registry(&url)?; + dep.set_registry_id(registry_id); + } + + if let Some(kind) = kind { + dep.set_kind(kind); + } + if let Some(name_in_toml) = explicit_name_in_toml { + cx.features.require(Feature::rename_dependency())?; + dep.set_explicit_name_in_toml(name_in_toml); + } + Ok(dep) + } +} + +#[derive(Default, Serialize, Deserialize, Debug, Clone)] +struct TomlTarget { + name: Option, + + // The intention was to only accept `crate-type` here but historical + // versions of Cargo also accepted `crate_type`, so look for both. + #[serde(rename = "crate-type")] + crate_type: Option>, + #[serde(rename = "crate_type")] + crate_type2: Option>, + + path: Option, + test: Option, + doctest: Option, + bench: Option, + doc: Option, + plugin: Option, + #[serde(rename = "proc-macro")] + proc_macro: Option, + #[serde(rename = "proc_macro")] + proc_macro2: Option, + harness: Option, + #[serde(rename = "required-features")] + required_features: Option>, + edition: Option, +} + +#[derive(Clone)] +struct PathValue(PathBuf); + +impl<'de> de::Deserialize<'de> for PathValue { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ok(PathValue(String::deserialize(deserializer)?.into())) + } +} + +impl ser::Serialize for PathValue { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + self.0.serialize(serializer) + } +} + +/// Corresponds to a `target` entry, but `TomlTarget` is already used. +#[derive(Serialize, Deserialize, Debug)] +struct TomlPlatform { + dependencies: Option>, + #[serde(rename = "build-dependencies")] + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + #[serde(rename = "dev-dependencies")] + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, +} + +impl TomlTarget { + fn new() -> TomlTarget { + TomlTarget::default() + } + + fn name(&self) -> String { + match self.name { + Some(ref name) => name.clone(), + None => panic!("target name is required"), + } + } + + fn proc_macro(&self) -> Option { + self.proc_macro.or(self.proc_macro2).or_else(|| { + if let Some(types) = self.crate_types() { + if types.contains(&"proc-macro".to_string()) { + return Some(true); + } + } + None + }) + } + + fn crate_types(&self) -> Option<&Vec> { + self.crate_type + .as_ref() + .or_else(|| self.crate_type2.as_ref()) + } +} + +impl fmt::Debug for PathValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} diff --git a/src/cargo/util/toml/targets.rs b/src/cargo/util/toml/targets.rs new file mode 100644 index 000000000..a834fad5e --- /dev/null +++ b/src/cargo/util/toml/targets.rs @@ -0,0 +1,823 @@ +//! This module implements Cargo conventions for directory layout: +//! +//! * `src/lib.rs` is a library +//! * `src/main.rs` is a binary +//! * `src/bin/*.rs` are binaries +//! * `examples/*.rs` are examples +//! * `tests/*.rs` are integration tests +//! * `benches/*.rs` are benchmarks +//! +//! It is a bit tricky because we need match explicit information from `Cargo.toml` +//! with implicit info in directory layout. + +use std::collections::HashSet; +use std::fs::{self, DirEntry}; +use std::path::{Path, PathBuf}; + +use super::{ + LibKind, PathValue, StringOrBool, StringOrVec, TomlBenchTarget, TomlBinTarget, + TomlExampleTarget, TomlLibTarget, TomlManifest, TomlTarget, TomlTestTarget, +}; +use crate::core::{compiler, Edition, Feature, Features, Target}; +use crate::util::errors::{CargoResult, CargoResultExt}; + +pub fn targets( + features: &Features, + manifest: &TomlManifest, + package_name: &str, + package_root: &Path, + edition: Edition, + custom_build: &Option, + metabuild: &Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let mut targets = Vec::new(); + + let has_lib; + + if let Some(target) = clean_lib( + features, + manifest.lib.as_ref(), + package_root, + package_name, + edition, + warnings, + )? { + targets.push(target); + has_lib = true; + } else { + has_lib = false; + } + + let package = manifest + .package + .as_ref() + .or_else(|| manifest.project.as_ref()) + .ok_or_else(|| failure::format_err!("manifest has no `package` (or `project`)"))?; + + targets.extend(clean_bins( + features, + manifest.bin.as_ref(), + package_root, + package_name, + edition, + package.autobins, + warnings, + errors, + has_lib, + )?); + + targets.extend(clean_examples( + features, + manifest.example.as_ref(), + package_root, + edition, + package.autoexamples, + warnings, + errors, + )?); + + targets.extend(clean_tests( + features, + manifest.test.as_ref(), + package_root, + edition, + package.autotests, + warnings, + errors, + )?); + + targets.extend(clean_benches( + features, + manifest.bench.as_ref(), + package_root, + edition, + package.autobenches, + warnings, + errors, + )?); + + // processing the custom build script + if let Some(custom_build) = manifest.maybe_custom_build(custom_build, package_root) { + if metabuild.is_some() { + failure::bail!("cannot specify both `metabuild` and `build`"); + } + let name = format!( + "build-script-{}", + custom_build + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("") + ); + targets.push(Target::custom_build_target( + &name, + package_root.join(custom_build), + edition, + )); + } + if let Some(metabuild) = metabuild { + // Verify names match available build deps. + let bdeps = manifest.build_dependencies.as_ref(); + for name in &metabuild.0 { + if !bdeps.map_or(false, |bd| bd.contains_key(name)) { + failure::bail!( + "metabuild package `{}` must be specified in `build-dependencies`", + name + ); + } + } + + targets.push(Target::metabuild_target(&format!( + "metabuild-{}", + package.name + ))); + } + + Ok(targets) +} + +fn clean_lib( + features: &Features, + toml_lib: Option<&TomlLibTarget>, + package_root: &Path, + package_name: &str, + edition: Edition, + warnings: &mut Vec, +) -> CargoResult> { + let inferred = inferred_lib(package_root); + let lib = match toml_lib { + Some(lib) => { + if let Some(ref name) = lib.name { + // XXX: other code paths dodge this validation + if name.contains('-') { + failure::bail!("library target names cannot contain hyphens: {}", name) + } + } + Some(TomlTarget { + name: lib.name.clone().or_else(|| Some(package_name.to_owned())), + ..lib.clone() + }) + } + None => inferred.as_ref().map(|lib| TomlTarget { + name: Some(package_name.to_string()), + path: Some(PathValue(lib.clone())), + ..TomlTarget::new() + }), + }; + + let lib = match lib { + Some(ref lib) => lib, + None => return Ok(None), + }; + + validate_has_name(lib, "library", "lib")?; + + let path = match (lib.path.as_ref(), inferred) { + (Some(path), _) => package_root.join(&path.0), + (None, Some(path)) => path, + (None, None) => { + let legacy_path = package_root.join("src").join(format!("{}.rs", lib.name())); + if edition == Edition::Edition2015 && legacy_path.exists() { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for library `{}`,\n\ + please rename the file to `src/lib.rs` or set lib.path in Cargo.toml", + legacy_path.display(), + lib.name() + )); + legacy_path + } else { + failure::bail!( + "can't find library `{}`, \ + rename file to `src/lib.rs` or specify lib.path", + lib.name() + ) + } + } + }; + + // Per the Macros 1.1 RFC: + // + // > Initially if a crate is compiled with the `proc-macro` crate type + // > (and possibly others) it will forbid exporting any items in the + // > crate other than those functions tagged #[proc_macro_derive] and + // > those functions must also be placed at the crate root. + // + // A plugin requires exporting plugin_registrar so a crate cannot be + // both at once. + let crate_types = match (lib.crate_types(), lib.plugin, lib.proc_macro()) { + (Some(kinds), _, _) if kinds.contains(&"proc-macro".to_string()) => { + if let Some(true) = lib.plugin { + // This is a warning to retain backwards compatibility. + warnings.push(format!( + "proc-macro library `{}` should not specify `plugin = true`", + lib.name() + )); + } + warnings.push(format!( + "library `{}` should only specify `proc-macro = true` instead of setting `crate-type`", + lib.name() + )); + if kinds.len() > 1 { + failure::bail!("cannot mix `proc-macro` crate type with others"); + } + vec![LibKind::ProcMacro] + } + (_, Some(true), Some(true)) => { + failure::bail!("`lib.plugin` and `lib.proc-macro` cannot both be `true`") + } + (Some(kinds), _, _) => kinds.iter().map(|s| s.into()).collect(), + (None, Some(true), _) => vec![LibKind::Dylib], + (None, _, Some(true)) => vec![LibKind::ProcMacro], + (None, _, _) => vec![LibKind::Lib], + }; + + let mut target = Target::lib_target(&lib.name(), crate_types, path, edition); + configure(features, lib, &mut target)?; + Ok(Some(target)) +} + +fn clean_bins( + features: &Features, + toml_bins: Option<&Vec>, + package_root: &Path, + package_name: &str, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + has_lib: bool, +) -> CargoResult> { + let inferred = inferred_bins(package_root, package_name); + + let bins = toml_targets_and_inferred( + toml_bins, + &inferred, + package_root, + autodiscover, + edition, + warnings, + "binary", + "bin", + "autobins", + ); + + for bin in &bins { + validate_has_name(bin, "binary", "bin")?; + + let name = bin.name(); + + if let Some(crate_types) = bin.crate_types() { + if !crate_types.is_empty() { + errors.push(format!( + "the target `{}` is a binary and can't have any \ + crate-types set (currently \"{}\")", + name, + crate_types.join(", ") + )); + } + } + + if bin.proc_macro() == Some(true) { + errors.push(format!( + "the target `{}` is a binary and can't have `proc-macro` \ + set `true`", + name + )); + } + + if compiler::is_bad_artifact_name(&name) { + failure::bail!("the binary target name `{}` is forbidden", name) + } + } + + validate_unique_names(&bins, "binary")?; + + let mut result = Vec::new(); + for bin in &bins { + let path = target_path(bin, &inferred, "bin", package_root, edition, &mut |_| { + if let Some(legacy_path) = legacy_bin_path(package_root, &bin.name(), has_lib) { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for binary `{}`,\n\ + please set bin.path in Cargo.toml", + legacy_path.display(), + bin.name() + )); + Some(legacy_path) + } else { + None + } + }); + let path = match path { + Ok(path) => path, + Err(e) => failure::bail!("{}", e), + }; + + let mut target = + Target::bin_target(&bin.name(), path, bin.required_features.clone(), edition); + configure(features, bin, &mut target)?; + result.push(target); + } + return Ok(result); + + fn legacy_bin_path(package_root: &Path, name: &str, has_lib: bool) -> Option { + if !has_lib { + let path = package_root.join("src").join(format!("{}.rs", name)); + if path.exists() { + return Some(path); + } + } + let path = package_root.join("src").join("main.rs"); + if path.exists() { + return Some(path); + } + + let path = package_root.join("src").join("bin").join("main.rs"); + if path.exists() { + return Some(path); + } + None + } +} + +fn clean_examples( + features: &Features, + toml_examples: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let inferred = infer_from_directory(&package_root.join("examples")); + + let targets = clean_targets( + "example", + "example", + toml_examples, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + "autoexamples", + )?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let crate_types = match toml.crate_types() { + Some(kinds) => kinds.iter().map(|s| s.into()).collect(), + None => Vec::new(), + }; + + let mut target = Target::example_target( + &toml.name(), + crate_types, + path, + toml.required_features.clone(), + edition, + ); + configure(features, &toml, &mut target)?; + result.push(target); + } + + Ok(result) +} + +fn clean_tests( + features: &Features, + toml_tests: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let inferred = infer_from_directory(&package_root.join("tests")); + + let targets = clean_targets( + "test", + "test", + toml_tests, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + "autotests", + )?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = + Target::test_target(&toml.name(), path, toml.required_features.clone(), edition); + configure(features, &toml, &mut target)?; + result.push(target); + } + Ok(result) +} + +fn clean_benches( + features: &Features, + toml_benches: Option<&Vec>, + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, +) -> CargoResult> { + let mut legacy_warnings = vec![]; + + let targets = { + let mut legacy_bench_path = |bench: &TomlTarget| { + let legacy_path = package_root.join("src").join("bench.rs"); + if !(bench.name() == "bench" && legacy_path.exists()) { + return None; + } + legacy_warnings.push(format!( + "path `{}` was erroneously implicitly accepted for benchmark `{}`,\n\ + please set bench.path in Cargo.toml", + legacy_path.display(), + bench.name() + )); + Some(legacy_path) + }; + + let inferred = infer_from_directory(&package_root.join("benches")); + + clean_targets_with_legacy_path( + "benchmark", + "bench", + toml_benches, + &inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + &mut legacy_bench_path, + "autobenches", + )? + }; + + warnings.append(&mut legacy_warnings); + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = + Target::bench_target(&toml.name(), path, toml.required_features.clone(), edition); + configure(features, &toml, &mut target)?; + result.push(target); + } + + Ok(result) +} + +fn clean_targets( + target_kind_human: &str, + target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + autodiscover_flag_name: &str, +) -> CargoResult> { + clean_targets_with_legacy_path( + target_kind_human, + target_kind, + toml_targets, + inferred, + package_root, + edition, + autodiscover, + warnings, + errors, + &mut |_| None, + autodiscover_flag_name, + ) +} + +fn clean_targets_with_legacy_path( + target_kind_human: &str, + target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + edition: Edition, + autodiscover: Option, + warnings: &mut Vec, + errors: &mut Vec, + legacy_path: &mut dyn FnMut(&TomlTarget) -> Option, + autodiscover_flag_name: &str, +) -> CargoResult> { + let toml_targets = toml_targets_and_inferred( + toml_targets, + inferred, + package_root, + autodiscover, + edition, + warnings, + target_kind_human, + target_kind, + autodiscover_flag_name, + ); + + for target in &toml_targets { + validate_has_name(target, target_kind_human, target_kind)?; + } + + validate_unique_names(&toml_targets, target_kind)?; + let mut result = Vec::new(); + for target in toml_targets { + let path = target_path( + &target, + inferred, + target_kind, + package_root, + edition, + legacy_path, + ); + let path = match path { + Ok(path) => path, + Err(e) => { + errors.push(e); + continue; + } + }; + result.push((path, target)); + } + Ok(result) +} + +fn inferred_lib(package_root: &Path) -> Option { + let lib = package_root.join("src").join("lib.rs"); + if fs::metadata(&lib).is_ok() { + Some(lib) + } else { + None + } +} + +fn inferred_bins(package_root: &Path, package_name: &str) -> Vec<(String, PathBuf)> { + let main = package_root.join("src").join("main.rs"); + let mut result = Vec::new(); + if main.exists() { + result.push((package_name.to_string(), main)); + } + result.extend(infer_from_directory(&package_root.join("src").join("bin"))); + + result +} + +fn infer_from_directory(directory: &Path) -> Vec<(String, PathBuf)> { + let entries = match fs::read_dir(directory) { + Err(_) => return Vec::new(), + Ok(dir) => dir, + }; + + entries + .filter_map(|e| e.ok()) + .filter(is_not_dotfile) + .filter_map(|d| infer_any(&d)) + .collect() +} + +fn infer_any(entry: &DirEntry) -> Option<(String, PathBuf)> { + if entry.path().extension().and_then(|p| p.to_str()) == Some("rs") { + infer_file(entry) + } else if entry.file_type().map(|t| t.is_dir()).ok() == Some(true) { + infer_subdirectory(entry) + } else { + None + } +} + +fn infer_file(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + path.file_stem() + .and_then(|p| p.to_str()) + .map(|p| (p.to_owned(), path.clone())) +} + +fn infer_subdirectory(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + let main = path.join("main.rs"); + let name = path.file_name().and_then(|n| n.to_str()); + match (name, main.exists()) { + (Some(name), true) => Some((name.to_owned(), main)), + _ => None, + } +} + +fn is_not_dotfile(entry: &DirEntry) -> bool { + entry.file_name().to_str().map(|s| s.starts_with('.')) == Some(false) +} + +fn toml_targets_and_inferred( + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + autodiscover: Option, + edition: Edition, + warnings: &mut Vec, + target_kind_human: &str, + target_kind: &str, + autodiscover_flag_name: &str, +) -> Vec { + let inferred_targets = inferred_to_toml_targets(inferred); + match toml_targets { + None => { + if let Some(false) = autodiscover { + vec![] + } else { + inferred_targets + } + } + Some(targets) => { + let mut targets = targets.clone(); + + let target_path = + |target: &TomlTarget| target.path.clone().map(|p| package_root.join(p.0)); + + let mut seen_names = HashSet::new(); + let mut seen_paths = HashSet::new(); + for target in targets.iter() { + seen_names.insert(target.name.clone()); + seen_paths.insert(target_path(target)); + } + + let mut rem_targets = vec![]; + for target in inferred_targets { + if !seen_names.contains(&target.name) && !seen_paths.contains(&target_path(&target)) + { + rem_targets.push(target); + } + } + + let autodiscover = match autodiscover { + Some(autodiscover) => autodiscover, + None => { + if edition == Edition::Edition2015 { + if !rem_targets.is_empty() { + let mut rem_targets_str = String::new(); + for t in rem_targets.iter() { + if let Some(p) = t.path.clone() { + rem_targets_str.push_str(&format!("* {}\n", p.0.display())) + } + } + warnings.push(format!( + "\ +An explicit [[{section}]] section is specified in Cargo.toml which currently +disables Cargo from automatically inferring other {target_kind_human} targets. +This inference behavior will change in the Rust 2018 edition and the following +files will be included as a {target_kind_human} target: + +{rem_targets_str} +This is likely to break cargo build or cargo test as these files may not be +ready to be compiled as a {target_kind_human} target today. You can future-proof yourself +and disable this warning by adding `{autodiscover_flag_name} = false` to your [package] +section. You may also move the files to a location where Cargo would not +automatically infer them to be a target, such as in subfolders. + +For more information on this warning you can consult +https://github.com/rust-lang/cargo/issues/5330", + section = target_kind, + target_kind_human = target_kind_human, + rem_targets_str = rem_targets_str, + autodiscover_flag_name = autodiscover_flag_name, + )); + }; + false + } else { + true + } + } + }; + + if autodiscover { + targets.append(&mut rem_targets); + } + + targets + } + } +} + +fn inferred_to_toml_targets(inferred: &[(String, PathBuf)]) -> Vec { + inferred + .iter() + .map(|&(ref name, ref path)| TomlTarget { + name: Some(name.clone()), + path: Some(PathValue(path.clone())), + ..TomlTarget::new() + }) + .collect() +} + +fn validate_has_name( + target: &TomlTarget, + target_kind_human: &str, + target_kind: &str, +) -> CargoResult<()> { + match target.name { + Some(ref name) => { + if name.trim().is_empty() { + failure::bail!("{} target names cannot be empty", target_kind_human) + } + } + None => failure::bail!( + "{} target {}.name is required", + target_kind_human, + target_kind + ), + } + + Ok(()) +} + +/// Will check a list of toml targets, and make sure the target names are unique within a vector. +fn validate_unique_names(targets: &[TomlTarget], target_kind: &str) -> CargoResult<()> { + let mut seen = HashSet::new(); + for name in targets.iter().map(|e| e.name()) { + if !seen.insert(name.clone()) { + failure::bail!( + "found duplicate {target_kind} name {name}, \ + but all {target_kind} targets must have a unique name", + target_kind = target_kind, + name = name + ); + } + } + Ok(()) +} + +fn configure(features: &Features, toml: &TomlTarget, target: &mut Target) -> CargoResult<()> { + let t2 = target.clone(); + target + .set_tested(toml.test.unwrap_or_else(|| t2.tested())) + .set_doc(toml.doc.unwrap_or_else(|| t2.documented())) + .set_doctest(toml.doctest.unwrap_or_else(|| t2.doctested())) + .set_benched(toml.bench.unwrap_or_else(|| t2.benched())) + .set_harness(toml.harness.unwrap_or_else(|| t2.harness())) + .set_proc_macro(toml.proc_macro.unwrap_or_else(|| t2.proc_macro())) + .set_for_host(match (toml.plugin, toml.proc_macro()) { + (None, None) => t2.for_host(), + (Some(true), _) | (_, Some(true)) => true, + (Some(false), _) | (_, Some(false)) => false, + }); + if let Some(edition) = toml.edition.clone() { + features + .require(Feature::edition()) + .chain_err(|| "editions are unstable")?; + target.set_edition( + edition + .parse() + .chain_err(|| "failed to parse the `edition` key")?, + ); + } + Ok(()) +} + +fn target_path( + target: &TomlTarget, + inferred: &[(String, PathBuf)], + target_kind: &str, + package_root: &Path, + edition: Edition, + legacy_path: &mut dyn FnMut(&TomlTarget) -> Option, +) -> Result { + if let Some(ref path) = target.path { + // Should we verify that this path exists here? + return Ok(package_root.join(&path.0)); + } + let name = target.name(); + + let mut matching = inferred + .iter() + .filter(|&&(ref n, _)| n == &name) + .map(|&(_, ref p)| p.clone()); + + let first = matching.next(); + let second = matching.next(); + match (first, second) { + (Some(path), None) => Ok(path), + (None, None) | (Some(_), Some(_)) => { + if edition == Edition::Edition2015 { + if let Some(path) = legacy_path(target) { + return Ok(path); + } + } + Err(format!( + "can't find `{name}` {target_kind}, specify {target_kind}.path", + name = name, + target_kind = target_kind + )) + } + (None, Some(_)) => unreachable!(), + } +} diff --git a/src/cargo/util/vcs.rs b/src/cargo/util/vcs.rs new file mode 100644 index 000000000..844748676 --- /dev/null +++ b/src/cargo/util/vcs.rs @@ -0,0 +1,102 @@ +use std::fs::create_dir; +use std::path::Path; + +use git2; + +use crate::util::{process, CargoResult}; + +// Check if we are in an existing repo. We define that to be true if either: +// +// 1. We are in a git repo and the path to the new package is not an ignored +// path in that repo. +// 2. We are in an HG repo. +pub fn existing_vcs_repo(path: &Path, cwd: &Path) -> bool { + fn in_git_repo(path: &Path, cwd: &Path) -> bool { + if let Ok(repo) = GitRepo::discover(path, cwd) { + repo.is_path_ignored(path) + .map(|ignored| !ignored) + .unwrap_or(true) + } else { + false + } + } + + in_git_repo(path, cwd) || HgRepo::discover(path, cwd).is_ok() +} + +pub struct HgRepo; +pub struct GitRepo; +pub struct PijulRepo; +pub struct FossilRepo; + +impl GitRepo { + pub fn init(path: &Path, _: &Path) -> CargoResult { + git2::Repository::init(path)?; + Ok(GitRepo) + } + pub fn discover(path: &Path, _: &Path) -> Result { + git2::Repository::discover(path) + } +} + +impl HgRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("hg").cwd(cwd).arg("init").arg(path).exec()?; + Ok(HgRepo) + } + pub fn discover(path: &Path, cwd: &Path) -> CargoResult { + process("hg") + .cwd(cwd) + .arg("--cwd") + .arg(path) + .arg("root") + .exec_with_output()?; + Ok(HgRepo) + } +} + +impl PijulRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("pijul").cwd(cwd).arg("init").arg(path).exec()?; + Ok(PijulRepo) + } +} + +impl FossilRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + // fossil doesn't create the directory so we'll do that first + create_dir(path)?; + + // set up the paths we'll use + let db_fname = ".fossil"; + let mut db_path = path.to_owned(); + db_path.push(db_fname); + + // then create the fossil DB in that location + process("fossil") + .cwd(cwd) + .arg("init") + .arg(&db_path) + .exec()?; + + // open it in that new directory + process("fossil") + .cwd(&path) + .arg("open") + .arg(db_fname) + .exec()?; + + // set `target` as ignoreable and cleanable + process("fossil") + .cwd(cwd) + .arg("settings") + .arg("ignore-glob") + .arg("target"); + process("fossil") + .cwd(cwd) + .arg("settings") + .arg("clean-glob") + .arg("target"); + Ok(FossilRepo) + } +} diff --git a/src/cargo/util/workspace.rs b/src/cargo/util/workspace.rs new file mode 100644 index 000000000..46683c457 --- /dev/null +++ b/src/cargo/util/workspace.rs @@ -0,0 +1,75 @@ +use crate::core::{Target, Workspace}; +use crate::ops::CompileOptions; +use crate::util::CargoResult; + +use std::fmt::Write; + +fn get_available_targets<'a>( + filter_fn: fn(&Target) -> bool, + ws: &'a Workspace<'_>, + options: &'a CompileOptions<'_>, +) -> CargoResult> { + let packages = options.spec.get_packages(ws)?; + + let mut targets: Vec<_> = packages + .into_iter() + .flat_map(|pkg| { + pkg.manifest() + .targets() + .iter() + .filter(|target| filter_fn(target)) + }) + .collect(); + + targets.sort(); + + Ok(targets) +} + +fn print_available( + filter_fn: fn(&Target) -> bool, + ws: &Workspace<'_>, + options: &CompileOptions<'_>, + option_name: &str, + plural_name: &str, +) -> CargoResult<()> { + let targets = get_available_targets(filter_fn, ws, options)?; + + let mut output = String::new(); + writeln!(output, "\"{}\" takes one argument.", option_name)?; + + if targets.is_empty() { + writeln!(output, "No {} available.", plural_name)?; + } else { + writeln!(output, "Available {}:", plural_name)?; + for target in targets { + writeln!(output, " {}", target.name())?; + } + } + Err(failure::err_msg(output))? +} + +pub fn print_available_examples( + ws: &Workspace<'_>, + options: &CompileOptions<'_>, +) -> CargoResult<()> { + print_available(Target::is_example, ws, options, "--example", "examples") +} + +pub fn print_available_binaries( + ws: &Workspace<'_>, + options: &CompileOptions<'_>, +) -> CargoResult<()> { + print_available(Target::is_bin, ws, options, "--bin", "binaries") +} + +pub fn print_available_benches( + ws: &Workspace<'_>, + options: &CompileOptions<'_>, +) -> CargoResult<()> { + print_available(Target::is_bench, ws, options, "--bench", "benches") +} + +pub fn print_available_tests(ws: &Workspace<'_>, options: &CompileOptions<'_>) -> CargoResult<()> { + print_available(Target::is_test, ws, options, "--test", "tests") +} diff --git a/src/crates-io/Cargo.toml b/src/crates-io/Cargo.toml new file mode 100644 index 000000000..042333347 --- /dev/null +++ b/src/crates-io/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "crates-io" +version = "0.23.0" +edition = "2018" +authors = ["Alex Crichton "] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/cargo" +description = """ +Helpers for interacting with crates.io +""" + +[lib] +name = "crates_io" +path = "lib.rs" + +[dependencies] +curl = "0.4" +failure = "0.1.1" +http = "0.1" +serde = { version = "1.0", features = ['derive'] } +serde_derive = "1.0" +serde_json = "1.0" +url = "1.0" diff --git a/src/crates-io/LICENSE-APACHE b/src/crates-io/LICENSE-APACHE new file mode 120000 index 000000000..1cd601d0a --- /dev/null +++ b/src/crates-io/LICENSE-APACHE @@ -0,0 +1 @@ +../../LICENSE-APACHE \ No newline at end of file diff --git a/src/crates-io/LICENSE-MIT b/src/crates-io/LICENSE-MIT new file mode 120000 index 000000000..b2cfbdc7b --- /dev/null +++ b/src/crates-io/LICENSE-MIT @@ -0,0 +1 @@ +../../LICENSE-MIT \ No newline at end of file diff --git a/src/crates-io/lib.rs b/src/crates-io/lib.rs new file mode 100644 index 000000000..1b4d860d6 --- /dev/null +++ b/src/crates-io/lib.rs @@ -0,0 +1,354 @@ +#![allow(unknown_lints)] +#![allow(clippy::identity_op)] // used for vertical alignment + +use std::collections::BTreeMap; +use std::fs::File; +use std::io::prelude::*; +use std::io::Cursor; + +use curl::easy::{Easy, List}; +use failure::bail; +use http::status::StatusCode; +use serde::{Deserialize, Serialize}; +use serde_json; +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +pub type Result = std::result::Result; + +pub struct Registry { + /// The base URL for issuing API requests. + host: String, + /// Optional authorization token. + /// If None, commands requiring authorization will fail. + token: Option, + /// Curl handle for issuing requests. + handle: Easy, +} + +#[derive(PartialEq, Clone, Copy)] +pub enum Auth { + Authorized, + Unauthorized, +} + +#[derive(Deserialize)] +pub struct Crate { + pub name: String, + pub description: Option, + pub max_version: String, +} + +#[derive(Serialize)] +pub struct NewCrate { + pub name: String, + pub vers: String, + pub deps: Vec, + pub features: BTreeMap>, + pub authors: Vec, + pub description: Option, + pub documentation: Option, + pub homepage: Option, + pub readme: Option, + pub readme_file: Option, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub repository: Option, + pub badges: BTreeMap>, + #[serde(default)] + pub links: Option, +} + +#[derive(Serialize)] +pub struct NewCrateDependency { + pub optional: bool, + pub default_features: bool, + pub name: String, + pub features: Vec, + pub version_req: String, + pub target: Option, + pub kind: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub registry: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub explicit_name_in_toml: Option, +} + +#[derive(Deserialize)] +pub struct User { + pub id: u32, + pub login: String, + pub avatar: Option, + pub email: Option, + pub name: Option, +} + +pub struct Warnings { + pub invalid_categories: Vec, + pub invalid_badges: Vec, + pub other: Vec, +} + +#[derive(Deserialize)] +struct R { + ok: bool, +} +#[derive(Deserialize)] +struct OwnerResponse { + ok: bool, + msg: String, +} +#[derive(Deserialize)] +struct ApiErrorList { + errors: Vec, +} +#[derive(Deserialize)] +struct ApiError { + detail: String, +} +#[derive(Serialize)] +struct OwnersReq<'a> { + users: &'a [&'a str], +} +#[derive(Deserialize)] +struct Users { + users: Vec, +} +#[derive(Deserialize)] +struct TotalCrates { + total: u32, +} +#[derive(Deserialize)] +struct Crates { + crates: Vec, + meta: TotalCrates, +} +impl Registry { + pub fn new(host: String, token: Option) -> Registry { + Registry::new_handle(host, token, Easy::new()) + } + + pub fn new_handle(host: String, token: Option, handle: Easy) -> Registry { + Registry { + host, + token, + handle, + } + } + + pub fn host(&self) -> &str { + &self.host + } + + pub fn add_owners(&mut self, krate: &str, owners: &[&str]) -> Result { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.put(&format!("/crates/{}/owners", krate), body.as_bytes())?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(serde_json::from_str::(&body)?.msg) + } + + pub fn remove_owners(&mut self, krate: &str, owners: &[&str]) -> Result<()> { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.delete(&format!("/crates/{}/owners", krate), Some(body.as_bytes()))?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn list_owners(&mut self, krate: &str) -> Result> { + let body = self.get(&format!("/crates/{}/owners", krate))?; + Ok(serde_json::from_str::(&body)?.users) + } + + pub fn publish(&mut self, krate: &NewCrate, tarball: &File) -> Result { + let json = serde_json::to_string(krate)?; + // Prepare the body. The format of the upload request is: + // + // + // (metadata for the package) + // + // + let stat = tarball.metadata()?; + let header = { + let mut w = Vec::new(); + w.extend( + [ + (json.len() >> 0) as u8, + (json.len() >> 8) as u8, + (json.len() >> 16) as u8, + (json.len() >> 24) as u8, + ].iter().cloned(), + ); + w.extend(json.as_bytes().iter().cloned()); + w.extend( + [ + (stat.len() >> 0) as u8, + (stat.len() >> 8) as u8, + (stat.len() >> 16) as u8, + (stat.len() >> 24) as u8, + ].iter().cloned(), + ); + w + }; + let size = stat.len() as usize + header.len(); + let mut body = Cursor::new(header).chain(tarball); + + let url = format!("{}/api/v1/crates/new", self.host); + + let token = match self.token.as_ref() { + Some(s) => s, + None => bail!("no upload token found, please run `cargo login`"), + }; + self.handle.put(true)?; + self.handle.url(&url)?; + self.handle.in_filesize(size as u64)?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append(&format!("Authorization: {}", token))?; + self.handle.http_headers(headers)?; + + let body = handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0))?; + + let response = if body.is_empty() { + "{}".parse()? + } else { + body.parse::()? + }; + + let invalid_categories: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_categories")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + let invalid_badges: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_badges")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + let other: Vec = response + .get("warnings") + .and_then(|j| j.get("other")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + Ok(Warnings { + invalid_categories, + invalid_badges, + other, + }) + } + + pub fn search(&mut self, query: &str, limit: u32) -> Result<(Vec, u32)> { + let formatted_query = percent_encode(query.as_bytes(), QUERY_ENCODE_SET); + let body = self.req( + &format!("/crates?q={}&per_page={}", formatted_query, limit), + None, + Auth::Unauthorized, + )?; + + let crates = serde_json::from_str::(&body)?; + Ok((crates.crates, crates.meta.total)) + } + + pub fn yank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.delete(&format!("/crates/{}/{}/yank", krate, version), None)?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn unyank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.put(&format!("/crates/{}/{}/unyank", krate, version), &[])?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + fn put(&mut self, path: &str, b: &[u8]) -> Result { + self.handle.put(true)?; + self.req(path, Some(b), Auth::Authorized) + } + + fn get(&mut self, path: &str) -> Result { + self.handle.get(true)?; + self.req(path, None, Auth::Authorized) + } + + fn delete(&mut self, path: &str, b: Option<&[u8]>) -> Result { + self.handle.custom_request("DELETE")?; + self.req(path, b, Auth::Authorized) + } + + fn req(&mut self, path: &str, body: Option<&[u8]>, authorized: Auth) -> Result { + self.handle.url(&format!("{}/api/v1{}", self.host, path))?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append("Content-Type: application/json")?; + + if authorized == Auth::Authorized { + let token = match self.token.as_ref() { + Some(s) => s, + None => bail!("no upload token found, please run `cargo login`"), + }; + headers.append(&format!("Authorization: {}", token))?; + } + self.handle.http_headers(headers)?; + match body { + Some(mut body) => { + self.handle.upload(true)?; + self.handle.in_filesize(body.len() as u64)?; + handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0)) + } + None => handle(&mut self.handle, &mut |_| 0), + } + } +} + +fn handle(handle: &mut Easy, read: &mut dyn FnMut(&mut [u8]) -> usize) -> Result { + let mut headers = Vec::new(); + let mut body = Vec::new(); + { + let mut handle = handle.transfer(); + handle.read_function(|buf| Ok(read(buf)))?; + handle.write_function(|data| { + body.extend_from_slice(data); + Ok(data.len()) + })?; + handle.header_function(|data| { + headers.push(String::from_utf8_lossy(data).into_owned()); + true + })?; + handle.perform()?; + } + + let body = match String::from_utf8(body) { + Ok(body) => body, + Err(..) => bail!("response body was not valid utf-8"), + }; + let errors = serde_json::from_str::(&body).ok().map(|s| { + s.errors.into_iter().map(|s| s.detail).collect::>() + }); + + match (handle.response_code()?, errors) { + (0, None) | (200, None) => {}, + (code, Some(errors)) => { + let code = StatusCode::from_u16(code as _)?; + bail!("api errors (status {}): {}", code, errors.join(", ")) + } + (code, None) => bail!( + "failed to get a 200 OK response, got {}\n\ + headers:\n\ + \t{}\n\ + body:\n\ + {}", + code, + headers.join("\n\t"), + body, + ), + } + + Ok(body) +} diff --git a/src/doc/Makefile b/src/doc/Makefile new file mode 100644 index 000000000..505532d2b --- /dev/null +++ b/src/doc/Makefile @@ -0,0 +1,28 @@ +# This Makefile is used to build the Cargo man pages. +# +# The source for the man pages are located in src/doc/man in Asciidoctor +# format. See https://asciidoctor.org/ for more information. +# +# Just run `make` and it will generate the man pages in src/etc/man and the +# HTML pages in src/doc/man/generated. +# +# There are some Asciidoctor extensions, see the file `asciidoc-extensions.rb` +# for the documentation. + +MAN_SOURCE = $(sort $(wildcard man/cargo*.adoc)) +COMMANDS = $(notdir $(MAN_SOURCE)) +HTML = $(patsubst %.adoc,man/generated/%.html,$(COMMANDS)) +MAN_LOCATION = ../etc/man +MAN = $(patsubst %.adoc,$(MAN_LOCATION)/%.1,$(COMMANDS)) +ASCIIDOCOPTS = -r ./asciidoc-extension.rb +OTHER_DEPS = asciidoc-extension.rb $(filter-out $(MAN_SOURCE),$(sort $(wildcard man/*.adoc))) + +all: commands-html man +commands-html: $(HTML) +man: $(MAN) + +$(HTML): man/generated/%.html : man/%.adoc asciidoc-extension.rb $(OTHER_DEPS) + asciidoctor $(ASCIIDOCOPTS) -s $< -o $@ + +$(MAN): $(MAN_LOCATION)/%.1 : man/%.adoc $(OTHER_DEPS) + asciidoctor $(ASCIIDOCOPTS) -b manpage $< -o $@ diff --git a/src/doc/README.md b/src/doc/README.md new file mode 100644 index 000000000..983c96693 --- /dev/null +++ b/src/doc/README.md @@ -0,0 +1,47 @@ +# The Cargo Book + + +### Requirements + +Building the book requires [mdBook]. To get it: + +[mdBook]: https://github.com/azerupi/mdBook + +```console +$ cargo install mdbook +``` + +### Building + +To build the book: + +```console +$ mdbook build +``` + +The output will be in the `book` subdirectory. To check it out, open it in +your web browser. + +_Firefox:_ +```console +$ firefox book/index.html # Linux +$ open -a "Firefox" book/index.html # OS X +$ Start-Process "firefox.exe" .\book\index.html # Windows (PowerShell) +$ start firefox.exe .\book\index.html # Windows (Cmd) +``` + +_Chrome:_ +```console +$ google-chrome book/index.html # Linux +$ open -a "Google Chrome" book/index.html # OS X +$ Start-Process "chrome.exe" .\book\index.html # Windows (PowerShell) +$ start chrome.exe .\book\index.html # Windows (Cmd) +``` + + +## Contributing + +Given that the book is still in a draft state, we'd love your help! Please feel free to open +issues about anything, and send in PRs for things you'd like to fix or change. If your change is +large, please open an issue first, so we can make sure that it's something we'd accept before you +go through the work of getting a PR together. diff --git a/src/doc/asciidoc-extension.rb b/src/doc/asciidoc-extension.rb new file mode 100644 index 000000000..6e662639c --- /dev/null +++ b/src/doc/asciidoc-extension.rb @@ -0,0 +1,109 @@ +require 'asciidoctor/extensions' unless RUBY_ENGINE == 'opal' + +include Asciidoctor + +# An inline macro that generates links to related man pages. +# +# Usage +# +# man:gittutorial[7] +# +class ManInlineMacro < Extensions::InlineMacroProcessor + use_dsl + + named :man + name_positional_attributes 'volnum' + + def process parent, target, attrs + manname = target + suffix = if (volnum = attrs['volnum']) + "(#{volnum})" + else + nil + end + text = %(#{manname}#{suffix}) + if parent.document.basebackend? 'html' + parent.document.register :links, target + if manname == 'rustc' + html_target = 'https://doc.rust-lang.org/rustc/index.html' + elsif manname == 'rustdoc' + html_target = 'https://doc.rust-lang.org/rustdoc/index.html' + elsif manname == 'cargo' + html_target = 'commands/index.html' + else + html_target = %(commands/#{manname}.html) + end + %(#{(create_anchor parent, text, type: :link, target: html_target).render}) + elsif parent.document.backend == 'manpage' + %(\x1b\\fB#{manname}\x1b\\fP#{suffix}) + else + text + end + end +end + +# Creates a link to something in the cargo documentation. +# +# For HTML this creates a relative link (using mdbook's 0.1's base-style +# links). For the man page it gives a direct link to doc.rust-lang.org. +# +# Usage +# +# linkcargo:reference/manifest.html[the manifest] +# +class LinkCargoInlineMacro < Extensions::InlineMacroProcessor + use_dsl + + named :linkcargo + name_positional_attributes 'text' + + def process parent, target, attrs + text = attrs['text'] + if parent.document.basebackend? 'html' + parent.document.register :links, target + %(#{(create_anchor parent, text, type: :link, target: target).render}) + elsif parent.document.backend == 'manpage' + target = %(https://doc.rust-lang.org/cargo/#{target}) + %(#{(create_anchor parent, text, type: :link, target: target).render}) + else + %(#{text} <#{target}>) + end + end +end + +# Backticks in the manpage renderer use the CR font (courier), but in most +# cases in a terminal this doesn't look any different. Instead, use bold which +# should follow man page conventions better. +class MonoPostprocessor < Extensions::Postprocessor + def process document, output + if document.basebackend? 'manpage' + output = output.gsub(/\\f\(CR/, '\\fB') + end + output + end +end + +# General utility for converting text. Example: +# +# convert:lowercase[{somevar}] +class ConvertInlineMacro < Extensions::InlineMacroProcessor + use_dsl + + named :convert + name_positional_attributes 'text' + + def process parent, target, attrs + text = attrs['text'] + case target + when 'lowercase' + text.downcase + end + end +end + +Extensions.register :uri_schemes do + inline_macro ManInlineMacro + inline_macro LinkCargoInlineMacro + inline_macro ConvertInlineMacro + postprocessor MonoPostprocessor +end diff --git a/src/doc/book.toml b/src/doc/book.toml new file mode 100644 index 000000000..1f21e1e2e --- /dev/null +++ b/src/doc/book.toml @@ -0,0 +1,2 @@ +title = "The Cargo Book" +author = "Alex Crichton, Steve Klabnik and Carol Nichols, with Contributions from the Rust Community" diff --git a/src/doc/man/cargo-bench.adoc b/src/doc/man/cargo-bench.adoc new file mode 100644 index 000000000..879ee2021 --- /dev/null +++ b/src/doc/man/cargo-bench.adoc @@ -0,0 +1,142 @@ += cargo-bench(1) +:idprefix: cargo_bench_ +:doctype: manpage +:actionverb: Benchmark +:nouns: benchmarks + +== NAME + +cargo-bench - Execute benchmarks of a package + +== SYNOPSIS + +`cargo bench [_OPTIONS_] [BENCHNAME] [-- _BENCH-OPTIONS_]` + +== DESCRIPTION + +Compile and execute benchmarks. + +The benchmark filtering argument `BENCHNAME` and all the arguments following +the two dashes (`--`) are passed to the benchmark binaries and thus to +_libtest_ (rustc's built in unit-test and micro-benchmarking framework). If +you're passing arguments to both Cargo and the binary, the ones after `--` go +to the binary, the ones before go to Cargo. For details about libtest's +arguments see the output of `cargo bench -- --help`. As an example, this will +run only the benchmark named `foo` (and skip other similarly named benchmarks +like `foobar`): + + cargo bench -- foo --exact + +Benchmarks are built with the `--test` option to `rustc` which creates an +executable with a `main` function that automatically runs all functions +annotated with the `#[bench]` attribute. Cargo passes the `--bench` flag to +the test harness to tell it to run only benchmarks. + +The libtest harness may be disabled by setting `harness = false` in the target +manifest settings, in which case your code will need to provide its own `main` +function to handle running benchmarks. + +== OPTIONS + +=== Benchmark Options + +include::options-test.adoc[] + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo bench` will build the +following targets of the selected packages: + +- lib — used to link with binaries and benchmarks +- bins (only if benchmark targets are built and required features are + available) +- lib as a benchmark +- bins as benchmarks +- benchmark targets + +The default behavior can be changed by setting the `bench` flag for the target +in the manifest settings. Setting examples to `bench = true` will build and +run the example as a benchmark. Setting targets to `bench = false` will stop +them from being benchmarked by default. Target selection options that take a +target by name ignore the `bench` flag and will always benchmark the given +target. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +By default the Rust test harness hides output from benchmark execution to keep +results readable. Benchmark output can be recovered (e.g., for debugging) by +passing `--nocapture` to the benchmark binaries: + + cargo bench -- --nocapture + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +The `--jobs` argument affects the building of the benchmark executable but +does not affect how many threads are used when running the benchmarks. The +Rust test harness runs benchmarks serially in a single thread. + +include::options-jobs.adoc[] + +== PROFILES + +Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +linkcargo:reference/manifest.html#the-profile-sections[the reference] +for more details. + +Benchmarks are always built with the `bench` profile. Binary and lib targets +are built separately as benchmarks with the `bench` profile. Library targets +are built with the `release` profiles when linked to binaries and benchmarks. +Dependencies use the `release` profile. + +If you need a debug build of a benchmark, try building it with +man:cargo-build[1] which will use the `test` profile which is by default +unoptimized and includes debug information. You can then run the debug-enabled +benchmark manually. + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Build and execute all the benchmarks of the current package: + + cargo bench + +. Run only a specific benchmark within a specific benchmark target: + + cargo bench --bench bench_name -- modname::some_benchmark + +== SEE ALSO +man:cargo[1], man:cargo-test[1] diff --git a/src/doc/man/cargo-build.adoc b/src/doc/man/cargo-build.adoc new file mode 100644 index 000000000..2fc660f8c --- /dev/null +++ b/src/doc/man/cargo-build.adoc @@ -0,0 +1,94 @@ += cargo-build(1) +:idprefix: cargo_build_ +:doctype: manpage +:actionverb: Build + +== NAME + +cargo-build - Compile the current package + +== SYNOPSIS + +`cargo build [_OPTIONS_]` + +== DESCRIPTION + +Compile local packages and all of their dependencies. + +== OPTIONS + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo build` will build all +binary and library targets of the selected packages. Binaries are skipped if +they have `required-features` that are missing. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +*--out-dir* _DIRECTORY_:: + Copy final artifacts to this directory. ++ +This option is unstable and available only on the nightly channel and requires +the `-Z unstable-options` flag to enable. + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +*--build-plan*:: + Outputs a series of JSON messages to stdout that indicate the commands to + run the build. ++ +This option is unstable and available only on the nightly channel and requires +the `-Z unstable-options` flag to enable. + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Build the local package and all of its dependencies: + + cargo build + +. Build with optimizations: + + cargo build --release + +== SEE ALSO +man:cargo[1], man:cargo-rustc[1] diff --git a/src/doc/man/cargo-check.adoc b/src/doc/man/cargo-check.adoc new file mode 100644 index 000000000..c84b1dcef --- /dev/null +++ b/src/doc/man/cargo-check.adoc @@ -0,0 +1,87 @@ += cargo-check(1) +:idprefix: cargo_check_ +:doctype: manpage +:actionverb: Check + +== NAME + +cargo-check - Check the current package + +== SYNOPSIS + +`cargo check [_OPTIONS_]` + +== DESCRIPTION + +Check a local package and all of its dependencies for errors. This will +essentially compile the packages without performing the final step of code +generation, which is faster than running `cargo build`. The compiler will save +metadata files to disk so that future runs will reuse them if the source has +not been modified. + +== OPTIONS + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo check` will check all +binary and library targets of the selected packages. Binaries are skipped if +they have `required-features` that are missing. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +include::options-profile.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Check the local package for errors: + + cargo check + +. Check all targets, including unit tests: + + cargo check --all-targets --profile=test + +== SEE ALSO +man:cargo[1], man:cargo-build[1] diff --git a/src/doc/man/cargo-clean.adoc b/src/doc/man/cargo-clean.adoc new file mode 100644 index 000000000..08fdb76de --- /dev/null +++ b/src/doc/man/cargo-clean.adoc @@ -0,0 +1,76 @@ += cargo-clean(1) +:idprefix: cargo_clean_ +:doctype: manpage +:actionverb: Clean + +== NAME + +cargo-clean - Remove generated artifacts + +== SYNOPSIS + +`cargo clean [_OPTIONS_]` + +== DESCRIPTION + +Remove artifacts from the target directory that Cargo has generated in the +past. + +With no options, `cargo clean` will delete the entire target directory. + +== OPTIONS + +=== Package Selection + +When no packages are selected, all packages and all dependencies in the +workspace are cleaned. + +*-p* _SPEC_...:: +*--package* _SPEC_...:: + Clean only the specified packages. This flag may be specified + multiple times. See man:cargo-pkgid[1] for the SPEC format. + +=== Clean Options + +*--doc*:: + This option will cause `cargo clean` to remove only the `doc` directory in + the target directory. + +*--release*:: + Clean all artifacts that were built with the `release` or `bench` + profiles. + +include::options-target-dir.adoc[] + +include::options-target-triple.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Remove the entire target directory: + + cargo clean + +. Remove only the release artifacts: + + cargo clean --release + +== SEE ALSO +man:cargo[1], man:cargo-build[1] diff --git a/src/doc/man/cargo-doc.adoc b/src/doc/man/cargo-doc.adoc new file mode 100644 index 000000000..99bfe75ff --- /dev/null +++ b/src/doc/man/cargo-doc.adoc @@ -0,0 +1,95 @@ += cargo-doc(1) +:idprefix: cargo_doc_ +:doctype: manpage +:actionverb: Document + +== NAME + +cargo-doc - Build a package's documentation + +== SYNOPSIS + +`cargo doc [_OPTIONS_]` + +== DESCRIPTION + +Build the documentation for the local package and all dependencies. The output +is placed in `target/doc` in rustdoc's usual format. + +== OPTIONS + +=== Documentation Options + +*--open*:: + Open the docs in a browser after building them. + +*--no-deps*:: + Do not build documentation for dependencies. + +*--document-private-items*:: + Include non-public items in the documentation. + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo doc` will document all +binary and library targets of the selected package. The binary will be skipped +if its name is the same as the lib target. Binaries are skipped if they have +`required-features` that are missing. + +The default behavior can be changed by setting `doc = false` for the target in +the manifest settings. Using target selection options will ignore the `doc` +flag and will always document the given target. + +include::options-targets-lib-bin.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Build the local package documentation and its dependencies and output to +`target/doc`. + + cargo doc + +== SEE ALSO +man:cargo[1], man:cargo-rustdoc[1], man:rustdoc[1] diff --git a/src/doc/man/cargo-fetch.adoc b/src/doc/man/cargo-fetch.adoc new file mode 100644 index 000000000..8afdd6864 --- /dev/null +++ b/src/doc/man/cargo-fetch.adoc @@ -0,0 +1,57 @@ += cargo-fetch(1) +:idprefix: cargo_fetch_ +:doctype: manpage +:actionverb: Fetch + +== NAME + +cargo-fetch - Fetch dependencies of a package from the network + +== SYNOPSIS + +`cargo fetch [_OPTIONS_]` + +== DESCRIPTION + +If a `Cargo.lock` file is available, this command will ensure that all of the +git dependencies and/or registry dependencies are downloaded and locally +available. Subsequent Cargo commands never touch the network after a `cargo +fetch` unless the lock file changes. + +If the lock file is not available, then this command will generate the lock +file before fetching the dependencies. + +If `--target` is not specified, then all target dependencies are fetched. + +== OPTIONS + +=== Fetch options + +include::options-target-triple.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Fetch all dependencies: + + cargo fetch + +== SEE ALSO +man:cargo[1], man:cargo-update[1], man:cargo-generate-lockfile[1] diff --git a/src/doc/man/cargo-fix.adoc b/src/doc/man/cargo-fix.adoc new file mode 100644 index 000000000..2a7ff56a6 --- /dev/null +++ b/src/doc/man/cargo-fix.adoc @@ -0,0 +1,138 @@ += cargo-fix(1) +:idprefix: cargo_fix_ +:doctype: manpage +:actionverb: Fix + +== NAME + +cargo-fix - Automatically fix lint warnings reported by rustc + +== SYNOPSIS + +`cargo fix [_OPTIONS_]` + +== DESCRIPTION + +This Cargo subcommand will automatically take rustc's suggestions from +diagnostics like warnings and apply them to your source code. This is intended +to help automate tasks that rustc itself already knows how to tell you to fix! +The `cargo fix` subcommand is also being developed for the Rust 2018 edition +to provide code the ability to easily opt-in to the new edition without having +to worry about any breakage. + +Executing `cargo fix` will under the hood execute man:cargo-check[1]. Any warnings +applicable to your crate will be automatically fixed (if possible) and all +remaining warnings will be displayed when the check process is finished. For +example if you'd like to prepare for the 2018 edition, you can do so by +executing: + + cargo fix --edition + +which behaves the same as `cargo check --all-targets`. Similarly if you'd like +to fix code for different platforms you can do: + + cargo fix --edition --target x86_64-pc-windows-gnu + +or if your crate has optional features: + + cargo fix --edition --no-default-features --features foo + +If you encounter any problems with `cargo fix` or otherwise have any questions +or feature requests please don't hesitate to file an issue at +https://github.com/rust-lang/cargo + +== OPTIONS + +=== Fix options + +*--broken-code*:: + Fix code even if it already has compiler errors. This is useful if `cargo + fix` fails to apply the changes. It will apply the changes and leave the + broken code in the working directory for you to inspect and manually fix. + +*--edition*:: + Apply changes that will update the code to the latest edition. This will + not update the edition in the `Cargo.toml` manifest, which must be updated + manually. + +*--edition-idioms*:: + Apply suggestions that will update code to the preferred style for the + current edition. + +*--allow-no-vcs*:: + Fix code even if a VCS was not detected. + +*--allow-dirty*:: + Fix code even if the working directory has changes. + +*--allow-staged*:: + Fix code even if the working directory has staged changes. + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo fix` will fix all targets +(`--all-targets` implied). Binaries are skipped if they have +`required-features` that are missing. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +include::options-profile.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Apply compiler suggestions to the local package: + + cargo fix + +. Convert a 2015 edition to 2018: + + cargo fix --edition + +. Apply suggested idioms for the current edition: + + cargo fix --edition-idioms + +== SEE ALSO +man:cargo[1], man:cargo-check[1] diff --git a/src/doc/man/cargo-generate-lockfile.adoc b/src/doc/man/cargo-generate-lockfile.adoc new file mode 100644 index 000000000..2b8915978 --- /dev/null +++ b/src/doc/man/cargo-generate-lockfile.adoc @@ -0,0 +1,49 @@ += cargo-generate-lockfile(1) +:idprefix: cargo_generate-lockfile_ +:doctype: manpage + +== NAME + +cargo-generate-lockfile - Generate the lockfile for a package + +== SYNOPSIS + +`cargo generate-lockfile [_OPTIONS_]` + +== DESCRIPTION + +This command will create the `Cargo.lock` lockfile for the current package or +workspace. If the lockfile already exists, it will be rebuilt if there are any +manifest changes or dependency updates. + +See also man:cargo-update[1] which is also capable of creating a `Cargo.lock` +lockfile and has more options for controlling update behavior. + +== OPTIONS + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Create or update the lockfile for the current package or workspace: + + cargo generate-lockfile + +== SEE ALSO +man:cargo[1], man:cargo-update[1] diff --git a/src/doc/man/cargo-help.adoc b/src/doc/man/cargo-help.adoc new file mode 100644 index 000000000..bcbb5ba34 --- /dev/null +++ b/src/doc/man/cargo-help.adoc @@ -0,0 +1,28 @@ += cargo-help(1) +:idprefix: cargo_help_ +:doctype: manpage + +== NAME + +cargo-help - Get help for a Cargo command + +== SYNOPSIS + +`cargo help [_SUBCOMMAND_]` + +== DESCRIPTION + +Prints a help message for the given command. + +== EXAMPLES + +. Get help for a command: + + cargo help build + +. Help is also available with the `--help` flag: + + cargo build --help + +== SEE ALSO +man:cargo[1] diff --git a/src/doc/man/cargo-init.adoc b/src/doc/man/cargo-init.adoc new file mode 100644 index 000000000..6df38bf68 --- /dev/null +++ b/src/doc/man/cargo-init.adoc @@ -0,0 +1,55 @@ += cargo-init(1) +:idprefix: cargo_init_ +:doctype: manpage + +== NAME + +cargo-init - Create a new Cargo package in an existing directory + +== SYNOPSIS + +`cargo init [_OPTIONS_] [_PATH_]` + +== DESCRIPTION + +This command will create a new Cargo manifest in the current directory. Give a +path as an argument to create in the given directory. + +If there are typically-named Rust source files already in the directory, those +will be used. If not, then a sample `src/main.rs` file will be created, or +`src/lib.rs` if `--lib` is passed. + +If the directory is not already in a VCS repository, then a new repository +is created (see `--vcs` below). + +include::description-new-authors.adoc[] + +See man:cargo-new[1] for a similar command which will create a new package in +a new directory. + +== OPTIONS + +=== Init Options + +include::options-new.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Create a binary Cargo package in the current directory: + + cargo init + +== SEE ALSO +man:cargo[1], man:cargo-new[1] diff --git a/src/doc/man/cargo-install.adoc b/src/doc/man/cargo-install.adoc new file mode 100644 index 000000000..94be115de --- /dev/null +++ b/src/doc/man/cargo-install.adoc @@ -0,0 +1,130 @@ += cargo-install(1) +:idprefix: cargo_install_ +:doctype: manpage +:actionverb: Install + +== NAME + +cargo-install - Build and install a Rust binary + +== SYNOPSIS + +[%hardbreaks] +`cargo install [_OPTIONS_] _CRATE_...` +`cargo install [_OPTIONS_] --path _PATH_` +`cargo install [_OPTIONS_] --git _URL_ [_CRATE_...]` +`cargo install [_OPTIONS_] --list` + +== DESCRIPTION + +This command manages Cargo's local set of installed binary crates. Only packages +which have `\[[bin]]` targets can be installed, and all binaries are installed into +the installation root's `bin` folder. + +include::description-install-root.adoc[] + +There are multiple sources from which a crate can be installed. The default +location is crates.io but the `--git` and `--path` flags can change this +source. If the source contains more than one package (such as crates.io or a +git repository with multiple crates) the _CRATE_ argument is required to +indicate which crate should be installed. + +Crates from crates.io can optionally specify the version they wish to install +via the `--version` flags, and similarly packages from git repositories can +optionally specify the branch, tag, or revision that should be installed. If a +crate has multiple binaries, the `--bin` argument can selectively install only +one of them, and if you'd rather install examples the `--example` argument can +be used as well. + +If the source is crates.io or `--git` then by default the crate will be built +in a temporary target directory. To avoid this, the target directory can be +specified by setting the `CARGO_TARGET_DIR` environment variable to a relative +path. In particular, this can be useful for caching build artifacts on +continuous integration systems. + +== OPTIONS + +=== Install Options + +*--vers* _VERSION_:: +*--version* _VERSION_:: + Specify a version to install from crates.io. + +*--git* _URL_:: + Git URL to install the specified crate from. + +*--branch* _BRANCH_:: + Branch to use when installing from git. + +*--tag* _TAG_:: + Tag to use when installing from git. + +*--rev* _SHA_:: + Specific commit to use when installing from git. + +*--path* _PATH_:: + Filesystem path to local crate to install. + +*--list*:: + List all installed packages and their versions. + +*-f*:: +*--force*:: + Force overwriting existing crates or binaries. This can be used to + reinstall or upgrade a crate. + + +*--bin* _NAME_...:: + Install only the specified binary. + +*--bins*:: + Install all binaries. + +*--example* _NAME_...:: + Install only the specified example. + +*--examples*:: + Install all examples. + +*--root* _DIR_:: + Directory to install packages into. + +include::options-registry.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +*--debug*:: + Build with the `dev` profile instead the `release` profile. + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Install a package from crates.io: + + cargo install ripgrep + +. Reinstall or upgrade a package: + + cargo install ripgrep --force + +== SEE ALSO +man:cargo[1], man:cargo-uninstall[1], man:cargo-search[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo-locate-project.adoc b/src/doc/man/cargo-locate-project.adoc new file mode 100644 index 000000000..adfc7a39e --- /dev/null +++ b/src/doc/man/cargo-locate-project.adoc @@ -0,0 +1,46 @@ += cargo-locate-project(1) +:idprefix: cargo_locate-project_ +:doctype: manpage + +== NAME + +cargo-locate-project - Print a JSON representation of a Cargo.toml file's location + +== SYNOPSIS + +`cargo locate-project [_OPTIONS_]` + +== DESCRIPTION + +This command will print a JSON object to stdout with the full path to the +`Cargo.toml` manifest. + +See also man:cargo-metadata[1] which is capable of returning the path to a +workspace root. + +== OPTIONS + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Display the path to the manifest based on the current directory: + + cargo locate-project + +== SEE ALSO +man:cargo[1], man:cargo-metadata[1] diff --git a/src/doc/man/cargo-login.adoc b/src/doc/man/cargo-login.adoc new file mode 100644 index 000000000..17d0e66ad --- /dev/null +++ b/src/doc/man/cargo-login.adoc @@ -0,0 +1,51 @@ += cargo-login(1) +:idprefix: cargo_login_ +:doctype: manpage + +== NAME + +cargo-login - Save an API token from the registry locally + +== SYNOPSIS + +`cargo login [_OPTIONS_] [_TOKEN_]` + +== DESCRIPTION + +This command will save the API token to disk so that commands that require +authentication, such as man:cargo-publish[1], will be automatically +authenticated. The token is saved in `$CARGO_HOME/credentials`. `CARGO_HOME` +defaults to `.cargo` in your home directory. + +If the _TOKEN_ argument is not specified, it will be read from stdin. + +The API token for crates.io may be retrieved from https://crates.io/me. + +Take care to keep the token secret, it should not be shared with anyone else. + +== OPTIONS + +=== Login Options + +include::options-registry.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Save the API token to disk: + + cargo login + +== SEE ALSO +man:cargo[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo-metadata.adoc b/src/doc/man/cargo-metadata.adoc new file mode 100644 index 000000000..35e6926a8 --- /dev/null +++ b/src/doc/man/cargo-metadata.adoc @@ -0,0 +1,286 @@ += cargo-metadata(1) +:idprefix: cargo_metadata_ +:doctype: manpage +:source-highlighter: highlightjs + +== NAME + +cargo-metadata - Machine-readable metadata about the current package + +== SYNOPSIS + +`cargo metadata [_OPTIONS_]` + +== DESCRIPTION + +Output the resolved dependencies of a package, the concrete used versions +including overrides, in JSON to stdout. + +It is recommended to include the `--format-version` flag to future-proof +your code to ensure the output is in the format you are expecting. + +See the link:https://crates.io/crates/cargo_metadata[cargo_metadata crate] +for a Rust API for reading the metadata. + +== OUTPUT FORMAT + +The output has the following format: + +[source,javascript] +---- +{ + /* Array of all packages in the workspace. + It also includes all feature-enabled dependencies unless --no-deps is used. + */ + "packages": [ + { + /* The name of the package. */ + "name": "my-package", + /* The version of the package. */ + "version": "0.1.0", + /* The Package ID, a unique identifier for referring to the package. */ + "id": "my-package 0.1.0 (path+file:///path/to/my-package)", + /* The license value from the manifest, or null. */ + "license": "MIT/Apache-2.0", + /* The license-file value from the manifest, or null. */ + "license_file": "LICENSE", + /* The description value from the manifest, or null. */ + "description": "Package description.", + /* The source ID of the package. This represents where + a package is retrieved from. + This is null for path dependencies and workspace members. + For other dependencies, it is a string with the format: + - "registry+URL" for registry-based dependencies. + Example: "registry+https://github.com/rust-lang/crates.io-index" + - "git+URL" for git-based dependencies. + Example: "git+https://github.com/rust-lang/cargo?rev=5e85ba14aaa20f8133863373404cb0af69eeef2c#5e85ba14aaa20f8133863373404cb0af69eeef2c" + */ + "source": null, + /* Array of dependencies declared in the package's manifest. */ + "dependencies": [ + { + /* The name of the dependency. */ + "name": "bitflags", + /* The source ID of the dependency. May be null, see + description for the package source. + */ + "source": "registry+https://github.com/rust-lang/crates.io-index", + /* The version requirement for the dependency. + Dependencies without a version requirement have a value of "*". + */ + "req": "^1.0", + /* The dependency kind. + "dev", "build", or null for a normal dependency. + */ + "kind": null, + /* If the dependency is renamed, this is the new name for + the dependency as a string. null if it is not renamed. + */ + "rename": null, + /* Boolean of whether or not this is an optional dependency. */ + "optional": false, + /* Boolean of whether or not default features are enabled. */ + "uses_default_features": true, + /* Array of features enabled. */ + "features": [], + /* The target platform for the dependency. + null if not a target dependency. + */ + "target": "cfg(windows)", + /* A string of the URL of the registry this dependency is from. + If not specified or null, the dependency is from the default + registry (crates.io). + */ + "registry": null + } + ], + /* Array of Cargo targets. */ + "targets": [ + { + /* Array of target kinds. + - lib targets list the `crate-type` values from the + manifest such as "lib", "rlib", "dylib", + "proc-macro", etc. (default ["lib"]) + - binary is ["bin"] + - example is ["example"] + - integration test is ["test"] + - benchmark is ["bench"] + - build script is ["custom-build"] + */ + "kind": [ + "bin" + ], + /* Array of crate types. + - lib and example libraries list the `crate-type` values + from the manifest such as "lib", "rlib", "dylib", + "proc-macro", etc. (default ["lib"]) + - all other target kinds are ["bin"] + */ + "crate_types": [ + "bin" + ], + /* The name of the target. */ + "name": "my-package", + /* Absolute path to the root source file of the target. */ + "src_path": "/path/to/my-package/src/main.rs", + /* The Rust edition of the target. + Defaults to the package edition. + */ + "edition": "2018", + /* Array of required features. + This property is not included if no required features are set. + */ + "required-features": ["feat1"] + } + ], + /* Set of features defined for the package. + Each feature maps to an array of features or dependencies it + enables. + */ + "features": { + "default": [ + "feat1" + ], + "feat1": [], + "feat2": [] + }, + /* Absolute path to this package's manifest. */ + "manifest_path": "/path/to/my-package/Cargo.toml", + /* Package metadata. + This is null if no metadata is specified. + */ + "metadata": { + "docs": { + "rs": { + "all-features": true + } + } + }, + /* Array of authors from the manifest. + Empty array if no authors specified. + */ + "authors": [ + "Jane Doe " + ], + /* Array of categories from the manifest. */ + "categories": [ + "command-line-utilities" + ], + /* Array of keywords from the manifest. */ + "keywords": [ + "cli" + ], + /* The readme value from the manifest or null if not specified. */ + "readme": "README.md", + /* The repository value from the manifest or null if not specified. */ + "repository": "https://github.com/rust-lang/cargo", + /* The default edition of the package. + Note that individual targets may have different editions. + */ + "edition": "2018", + /* Optional string that is the name of a native library the package + is linking to. + */ + "links": null, + } + ], + /* Array of members of the workspace. + Each entry is the Package ID for the package. + */ + "workspace_members": [ + "my-package 0.1.0 (path+file:///path/to/my-package)", + ], + /* The resolved dependency graph, with the concrete versions and features + selected. The set depends on the enabled features. + This is null if --no-deps is specified. + */ + "resolve": { + /* Array of nodes within the dependency graph. + Each node is a package. + */ + "nodes": [ + { + /* The Package ID of this node. */ + "id": "my-package 0.1.0 (path+file:///path/to/my-package)", + /* The dependencies of this package, an array of Package IDs. */ + "dependencies": [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" + ], + /* The dependencies of this package. This is an alternative to + "dependencies" which contains additional information. In + particular, this handles renamed dependencies. + */ + "deps": [ + { + /* The name of the dependency. + If this is a renamed dependency, this is the new + name. + */ + "name": "bitflags", + /* The Package ID of the dependency. */ + "pkg": "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" + } + ], + /* Array of features enabled on this package. */ + "features": [ + "default" + ] + } + ], + /* The root package of the workspace. + This is null if this is a virtual workspace. Otherwise it is + the Package ID of the root package. + */ + "root": "my-package 0.1.0 (path+file:///path/to/my-package)" + }, + /* The absolute path to the build directory where Cargo places its output. */ + "target_directory": "/path/to/my-package/target", + /* The version of the schema for this metadata structure. + This will be changed if incompatible changes are ever made. + */ + "version": 1, + /* The absolute path to the root of the workspace. */ + "workspace_root": "/path/to/my-package" +} +---- + +== OPTIONS + +=== Output Options + +*--no-deps*:: + Output information only about the workspace members and don't fetch + dependencies. + +*--format-version* _VERSION_:: + Specify the version of the output format to use. Currently `1` is the only + possible value. + +include::options-features.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Output JSON about the current package: + + cargo metadata --format-version=1 + +== SEE ALSO +man:cargo[1] diff --git a/src/doc/man/cargo-new.adoc b/src/doc/man/cargo-new.adoc new file mode 100644 index 000000000..6587a3789 --- /dev/null +++ b/src/doc/man/cargo-new.adoc @@ -0,0 +1,50 @@ += cargo-new(1) +:idprefix: cargo_new_ +:doctype: manpage + +== NAME + +cargo-new - Create a new Cargo package + +== SYNOPSIS + +`cargo new [_OPTIONS_] _PATH_` + +== DESCRIPTION + +This command will create a new Cargo package in the given directory. This +includes a simple template with a `Cargo.toml` manifest, sample source file, +and a VCS ignore file. If the directory is not already in a VCS repository, +then a new repository is created (see `--vcs` below). + +include::description-new-authors.adoc[] + +See man:cargo-init[1] for a similar command which will create a new manifest +in an existing directory. + +== OPTIONS + +=== New Options + +include::options-new.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Create a binary Cargo package in the given directory: + + cargo new foo + +== SEE ALSO +man:cargo[1], man:cargo-init[1] diff --git a/src/doc/man/cargo-owner.adoc b/src/doc/man/cargo-owner.adoc new file mode 100644 index 000000000..63e6e309d --- /dev/null +++ b/src/doc/man/cargo-owner.adoc @@ -0,0 +1,80 @@ += cargo-owner(1) +:idprefix: cargo_owner_ +:doctype: manpage + +== NAME + +cargo-owner - Manage the owners of a crate on the registry + +== SYNOPSIS + +[%hardbreaks] +`cargo owner [_OPTIONS_] --add _LOGIN_ [_CRATE_]` +`cargo owner [_OPTIONS_] --remove _LOGIN_ [_CRATE_]` +`cargo owner [_OPTIONS_] --list [_CRATE_]` + +== DESCRIPTION + +This command will modify the owners for a crate on the registry. Owners of a +crate can upload new versions and yank old versions. Non-team owners can also +modify the set of owners, so take care! + +This command requires you to be authenticated with either the `--token` option +or using man:cargo-login[1]. + +If the crate name is not specified, it will use the package name from the +current directory. + +See linkcargo:reference/publishing.html#cargo-owner[the reference] for more +information about owners and publishing. + +== OPTIONS + +=== Owner Options + +*-a*:: +*--add* _LOGIN_...:: + Invite the given user or team as an owner. + +*-r*:: +*--remove* _LOGIN_...:: + Remove the given user or team as an owner. + +*-l*:: +*--list*:: + List owners of a crate. + +include::options-token.adoc[] + +include::options-index.adoc[] + +include::options-registry.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. List owners of a package: + + cargo owner --list foo + +. Invite an owner to a package: + + cargo owner --add username foo + +. Remove an owner from a package: + + cargo owner --remove username foo + +== SEE ALSO +man:cargo[1], man:cargo-login[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo-package.adoc b/src/doc/man/cargo-package.adoc new file mode 100644 index 000000000..26eed2b13 --- /dev/null +++ b/src/doc/man/cargo-package.adoc @@ -0,0 +1,95 @@ += cargo-package(1) +:idprefix: cargo_package_ +:doctype: manpage +:actionverb: Package + +== NAME + +cargo-package - Assemble the local package into a distributable tarball + +== SYNOPSIS + +`cargo package [_OPTIONS_]` + +== DESCRIPTION + +This command will create a distributable, compressed `.crate` file with the +source code of the package in the current directory. The resulting file will +be stored in the `target/package` directory. This performs the following +steps: + +. Load and check the current workspace, performing some basic checks. + - Path dependencies are not allowed unless they have a version key. Cargo + will ignore the path key for dependencies in published packages. +. Create the compressed `.crate` file. + - The original `Cargo.toml` file is rewritten and normalized. + - `[patch]`, `[replace]`, and `[workspace]` sections are removed from the + manifest. + - A `.cargo_vcs_info.json` file is included that contains information + about the current VCS checkout hash if available (not included with + `--allow-dirty`). +. Extract the `.crate` file and build it to verify it can build. +. Check that build scripts did not modify any source files. + +The list of files included can be controlled with the `include` and `exclude` +fields in the manifest. + +See linkcargo:reference/publishing.html[the reference] for more details about +packaging and publishing. + +== OPTIONS + +=== Package Options + +*-l*:: +*--list*:: + Print files included in a package without making one. + +*--no-verify*:: + Don't verify the contents by building them. + +*--no-metadata*:: + Ignore warnings about a lack of human-usable metadata (such as the + description or the license). + +*--allow-dirty*:: + Allow working directories with uncommitted VCS changes to be packaged. + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-target-dir.adoc[] + +include::options-features.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Create a compressed `.crate` file of the current package: + + cargo package + +== SEE ALSO +man:cargo[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo-pkgid.adoc b/src/doc/man/cargo-pkgid.adoc new file mode 100644 index 000000000..98ff9dd9d --- /dev/null +++ b/src/doc/man/cargo-pkgid.adoc @@ -0,0 +1,94 @@ += cargo-pkgid(1) +:idprefix: cargo_pkgid_ +:doctype: manpage + +== NAME + +cargo-pkgid - Print a fully qualified package specification + +== SYNOPSIS + +`cargo pkgid [_OPTIONS_] [_SPEC_]` + +== DESCRIPTION + +Given a _SPEC_ argument, print out the fully qualified package ID specifier +for a package or dependency in the current workspace. This command will +generate an error if _SPEC_ is ambiguous as to which package it refers to in +the dependency graph. If no _SPEC_ is given, then the specifier for the local +package is printed. + +This command requires that a lockfile is available and dependencies have been +fetched. + +A package specifier consists of a name, version, and source URL. You are +allowed to use partial specifiers to succinctly match a specific package as +long as it matches only one package. The format of a _SPEC_ can be one of the +following: + +[%autowidth] +.SPEC Query Format +|=== +|SPEC Structure |Example SPEC + +|__NAME__ +|`bitflags` + +|__NAME__``:``__VERSION__ +|`bitflags:1.0.4` + +|__URL__ +|`https://github.com/rust-lang/cargo` + +|__URL__``#``__VERSION__ +|`https://github.com/rust-lang/cargo#0.33.0` + +|__URL__``#``__NAME__ +|`https://github.com/rust-lang/crates.io-index#bitflags` + +|__URL__``#``__NAME__``:``__VERSION__ +|`https://github.com/rust-lang/cargo#crates-io:0.21.0` +|=== + +== OPTIONS + +=== Package Selection + +*-p* _SPEC_:: +*--package* _SPEC_:: + Get the package ID for the given package instead of the current package. + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Retrieve package specification for `foo` package: + + cargo pkgid foo + +. Retrieve package specification for version 1.0.0 of `foo`: + + cargo pkgid foo:1.0.0 + +. Retrieve package specification for `foo` from crates.io: + + cargo pkgid https://github.com/rust-lang/crates.io-index#foo + +== SEE ALSO +man:cargo[1], man:cargo-generate-lockfile[1], man:cargo-metadata[1] diff --git a/src/doc/man/cargo-publish.adoc b/src/doc/man/cargo-publish.adoc new file mode 100644 index 000000000..ae247f486 --- /dev/null +++ b/src/doc/man/cargo-publish.adoc @@ -0,0 +1,90 @@ += cargo-publish(1) +:idprefix: cargo_publish_ +:doctype: manpage +:actionverb: Publish + +== NAME + +cargo-publish - Upload a package to the registry + +== SYNOPSIS + +`cargo package [_OPTIONS_]` + +== DESCRIPTION + +This command will create a distributable, compressed `.crate` file with the +source code of the package in the current directory and upload it to a +registry. The default registry is https://crates.io. This performs the +following steps: + +. Performs a few checks, including: + - Checks the `package.publish` key in the manifest for restrictions on which + registries you are allowed to publish to. +. Create a `.crate` file by following the steps in man:cargo-package[1]. +. Upload the crate to the registry. Note that the server will perform + additional checks on the crate. + +This command requires you to be authenticated with either the `--token` option +or using man:cargo-login[1]. + +See linkcargo:reference/publishing.html[the reference] for more details about +packaging and publishing. + +== OPTIONS + +=== Publish Options + +*--dry-run*:: + Perform all checks without uploading. + +include::options-token.adoc[] + +*--no-verify*:: + Don't verify the contents by building them. + +*--allow-dirty*:: + Allow working directories with uncommitted VCS changes to be packaged. + +include::options-index.adoc[] + +include::options-registry.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-target-dir.adoc[] + +include::options-features.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Publish the current package: + + cargo publish + +== SEE ALSO +man:cargo[1], man:cargo-package[1], man:cargo-login[1] diff --git a/src/doc/man/cargo-run.adoc b/src/doc/man/cargo-run.adoc new file mode 100644 index 000000000..21bec850c --- /dev/null +++ b/src/doc/man/cargo-run.adoc @@ -0,0 +1,89 @@ += cargo-run(1) +:idprefix: cargo_run_ +:doctype: manpage +:actionverb: Run + +== NAME + +cargo-run - Run the current package + +== SYNOPSIS + +`cargo run [_OPTIONS_] [-- _ARGS_]` + +== DESCRIPTION + +Run a binary or example of the local package. + +All the arguments following the two dashes (`--`) are passed to the binary to +run. If you're passing arguments to both Cargo and the binary, the ones after +`--` go to the binary, the ones before go to Cargo. + +== OPTIONS + +=== Package Selection + +include::options-package.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo run` will run the binary +target. If there are multiple binary targets, you must pass a target flag to +choose one. + +*--bin* _NAME_:: + Run the specified binary. + +*--example* _NAME_:: + Run the specified example. + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Build the local package and run its main target (assuming only one binary): + + cargo run + +. Run an example with extra arguments: + + cargo run --example exname -- --exoption exarg1 exarg2 + +== SEE ALSO +man:cargo[1], man:cargo-build[1] diff --git a/src/doc/man/cargo-rustc.adoc b/src/doc/man/cargo-rustc.adoc new file mode 100644 index 000000000..7ba60cb19 --- /dev/null +++ b/src/doc/man/cargo-rustc.adoc @@ -0,0 +1,94 @@ += cargo-rustc(1) +:idprefix: cargo_rustc_ +:doctype: manpage +:actionverb: Build + +== NAME + +cargo-rustc - Compile the current package, and pass extra options to the compiler + +== SYNOPSIS + +`cargo rustc [_OPTIONS_] [-- _ARGS_]` + +== DESCRIPTION + +The specified target for the current package (or package specified by `-p` if +provided) will be compiled along with all of its dependencies. The specified +_ARGS_ will all be passed to the final compiler invocation, not any of the +dependencies. Note that the compiler will still unconditionally receive +arguments such as `-L`, `--extern`, and `--crate-type`, and the specified +_ARGS_ will simply be added to the compiler invocation. + +See https://doc.rust-lang.org/rustc/index.html for documentation on rustc +flags. + +include::description-one-target.adoc[] +To pass flags to all compiler processes spawned by Cargo, use the `RUSTFLAGS` +environment variable or the `build.rustflags` +linkcargo:reference/config.html[config value]. + +== OPTIONS + +=== Package Selection + +include::options-package.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo rustc` will build all +binary and library targets of the selected package. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Check if your package (not including dependencies) uses unsafe code: + + cargo rustc --lib -- -D unsafe-code + +. Try an experimental flag on the nightly compiler, such as this which prints + the size of every type: + + cargo rustc --lib -- -Z print-type-sizes + +== SEE ALSO +man:cargo[1], man:cargo-build[1], man:rustc[1] diff --git a/src/doc/man/cargo-rustdoc.adoc b/src/doc/man/cargo-rustdoc.adoc new file mode 100644 index 000000000..6d7fea761 --- /dev/null +++ b/src/doc/man/cargo-rustdoc.adoc @@ -0,0 +1,96 @@ += cargo-rustdoc(1) +:idprefix: cargo_rustdoc_ +:doctype: manpage +:actionverb: Document + +== NAME + +cargo-rustdoc - Build a package's documentation, using specified custom flags + +== SYNOPSIS + +`cargo rustdoc [_OPTIONS_] [-- _ARGS_]` + +== DESCRIPTION + +The specified target for the current package (or package specified by `-p` if +provided) will be documented with the specified _ARGS_ being passed to the +final rustdoc invocation. Dependencies will not be documented as part of this +command. Note that rustdoc will still unconditionally receive arguments such +as `-L`, `--extern`, and `--crate-type`, and the specified _ARGS_ will simply +be added to the rustdoc invocation. + +See https://doc.rust-lang.org/rustdoc/index.html for documentation on rustdoc +flags. + +include::description-one-target.adoc[] +To pass flags to all rustdoc processes spawned by Cargo, use the +`RUSTDOCFLAGS` environment variable or the `build.rustdocflags` configuration +option. + +== OPTIONS + +=== Documentation Options + +*--open*:: + Open the docs in a browser after building them. + +=== Package Selection + +include::options-package.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo rustdoc` will document all +binary and library targets of the selected package. The binary will be skipped +if its name is the same as the lib target. Binaries are skipped if they have +`required-features` that are missing. + +include::options-targets.adoc[] + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Build documentation with custom CSS included from a given file: + + cargo rustdoc --lib -- --extend-css extra.css + +== SEE ALSO +man:cargo[1], man:cargo-doc[1], man:rustdoc[1] diff --git a/src/doc/man/cargo-search.adoc b/src/doc/man/cargo-search.adoc new file mode 100644 index 000000000..4d5128592 --- /dev/null +++ b/src/doc/man/cargo-search.adoc @@ -0,0 +1,49 @@ += cargo-search(1) +:idprefix: cargo_search_ +:doctype: manpage + +== NAME + +cargo-search - Search packages in crates.io + +== SYNOPSIS + +`cargo search [_OPTIONS_] [_QUERY_...]` + +== DESCRIPTION + +This performs a textual search for crates on https://crates.io. The matching +crates will be displayed along with their description in TOML format suitable +for copying into a `Cargo.toml` manifest. + +== OPTIONS + +=== Search Options + +*--limit* _LIMIT_:: + Limit the number of results (default: 10, max: 100). + +include::options-index.adoc[] + +include::options-registry.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Search for a package from crates.io: + + cargo search serde + +== SEE ALSO +man:cargo[1], man:cargo-install[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo-test.adoc b/src/doc/man/cargo-test.adoc new file mode 100644 index 000000000..7f1985265 --- /dev/null +++ b/src/doc/man/cargo-test.adoc @@ -0,0 +1,152 @@ += cargo-test(1) +:idprefix: cargo_test_ +:doctype: manpage +:actionverb: Test +:nouns: tests + +== NAME + +cargo-test - Execute unit and integration tests of a package + +== SYNOPSIS + +`cargo test [_OPTIONS_] [TESTNAME] [-- _TEST-OPTIONS_]` + +== DESCRIPTION + +Compile and execute unit and integration tests. + +The test filtering argument `TESTNAME` and all the arguments following the two +dashes (`--`) are passed to the test binaries and thus to _libtest_ (rustc's +built in unit-test and micro-benchmarking framework). If you're passing +arguments to both Cargo and the binary, the ones after `--` go to the binary, +the ones before go to Cargo. For details about libtest's arguments see the +output of `cargo test -- --help`. As an example, this will run all tests with +`foo` in their name on 3 threads in parallel: + + cargo test foo -- --test-threads 3 + +Tests are built with the `--test` option to `rustc` which creates an +executable with a `main` function that automatically runs all functions +annotated with the `\#[test]` attribute in multiple threads. `#[bench]` +annotated functions will also be run with one iteration to verify that they +are functional. + +The libtest harness may be disabled by setting `harness = false` in the target +manifest settings, in which case your code will need to provide its own `main` +function to handle running tests. + +Documentation tests are also run by default, which is handled by `rustdoc`. It +extracts code samples from documentation comments and executes them. See the +link:https://doc.rust-lang.org/rustdoc/[rustdoc book] for more information on +writing doc tests. + +== OPTIONS + +=== Test Options + +include::options-test.adoc[] + +=== Package Selection + +include::options-packages.adoc[] + +=== Target Selection + +When no target selection options are given, `cargo test` will build the +following targets of the selected packages: + +- lib — used to link with binaries, examples, integration tests, and doc tests +- bins (only if integration tests are built and required features are + available) +- examples — to ensure they compile +- lib as a unit test +- bins as unit tests +- integration tests +- doc tests for the lib target + +The default behavior can be changed by setting the `test` flag for the target +in the manifest settings. Setting examples to `test = true` will build and run +the example as a test. Setting targets to `test = false` will stop them from +being tested by default. Target selection options that take a target by name +ignore the `test` flag and will always test the given target. + +Doc tests for libraries may be disabled by setting `doctest = false` for the +library in the manifest. + +include::options-targets.adoc[] + +*--doc*:: + Test only the library's documentation. This cannot be mixed with other + target options. + +include::options-features.adoc[] + +=== Compilation Options + +include::options-target-triple.adoc[] + +include::options-release.adoc[] + +=== Output Options + +include::options-target-dir.adoc[] + +=== Display Options + +By default the Rust test harness hides output from test execution to keep +results readable. Test output can be recovered (e.g., for debugging) by passing +`--nocapture` to the test binaries: + + cargo test -- --nocapture + +include::options-display.adoc[] + +include::options-message-format.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +=== Miscellaneous Options + +The `--jobs` argument affects the building of the test executable but does not +affect how many threads are used when running the tests. The Rust test harness +includes an option to control the number of threads used: + + cargo test -j 2 -- --test-threads=2 + +include::options-jobs.adoc[] + +include::section-profiles.adoc[] + +Unit tests are separate executable artifacts which use the `test`/`bench` +profiles. Example targets are built the same as with `cargo build` (using the +`dev`/`release` profiles) unless you are building them with the test harness +(by setting `test = true` in the manifest or using the `--example` flag) in +which case they use the `test`/`bench` profiles. Library targets are built +with the `dev`/`release` profiles when linked to an integration test, binary, +or doctest. + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Execute all the unit and integration tests of the current package: + + cargo test + +. Run only a specific test within a specific integration test: + + cargo test --test int_test_name -- modname::test_name + +== SEE ALSO +man:cargo[1], man:cargo-bench[1] diff --git a/src/doc/man/cargo-uninstall.adoc b/src/doc/man/cargo-uninstall.adoc new file mode 100644 index 000000000..b75a10401 --- /dev/null +++ b/src/doc/man/cargo-uninstall.adoc @@ -0,0 +1,57 @@ += cargo-uninstall(1) +:idprefix: cargo_uninstall_ +:doctype: manpage + +== NAME + +cargo-uninstall - Remove a Rust binary + +== SYNOPSIS + +`cargo uninstall [_OPTIONS_] [_SPEC_...]` + +== DESCRIPTION + +This command removes a package installed with man:cargo-install[1]. The _SPEC_ +argument is a package ID specification of the package to remove (see +man:cargo-pkgid[1]). + +By default all binaries are removed for a crate but the `--bin` and +`--example` flags can be used to only remove particular binaries. + +include::description-install-root.adoc[] + +== OPTIONS + +=== Install Options + +*-p*:: +*--package* _SPEC_...:: + Package to uninstall. + +*--bin* _NAME_...:: + Only uninstall the binary _NAME_. + +*--root* _DIR_:: + Directory to uninstall packages from. + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Uninstall a previously installed package. + + cargo uninstall ripgrep + +== SEE ALSO +man:cargo[1], man:cargo-install[1] diff --git a/src/doc/man/cargo-update.adoc b/src/doc/man/cargo-update.adoc new file mode 100644 index 000000000..c8a527435 --- /dev/null +++ b/src/doc/man/cargo-update.adoc @@ -0,0 +1,81 @@ += cargo-update(1) +:idprefix: cargo_update_ +:doctype: manpage + +== NAME + +cargo-update - Update dependencies as recorded in the local lock file + +== SYNOPSIS + +`cargo update [_OPTIONS_]` + +== DESCRIPTION + +This command will update dependencies in the `Cargo.lock` file to the latest +version. It requires that the `Cargo.lock` file already exists as generated +by commands such as man:cargo-build[1] or man:cargo-generate-lockfile[1]. + +== OPTIONS + +=== Update Options + +*-p* _SPEC_...:: +*--package* _SPEC_...:: + Update only the specified packages. This flag may be specified + multiple times. See man:cargo-pkgid[1] for the SPEC format. ++ +If packages are specified with the `-p` flag, then a conservative update of +the lockfile will be performed. This means that only the dependency specified +by SPEC will be updated. Its transitive dependencies will be updated only if +SPEC cannot be updated without updating dependencies. All other dependencies +will remain locked at their currently recorded versions. ++ +If `-p` is not specified, all dependencies are updated. + +*--aggressive*:: + When used with `-p`, dependencies of _SPEC_ are forced to update as well. + Cannot be used with `--precise`. + +*--precise* _PRECISE_:: + When used with `-p`, allows you to specify a specific version number to + set the package to. If the package comes from a git repository, this can + be a git revision (such as a SHA hash or tag). + +*--dry-run*:: + Displays what would be updated, but doesn't actually write the lockfile. + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Update all dependencies in the lockfile: + + cargo update + +. Update only specific dependencies: + + cargo update -p foo -p bar + +. Set a specific dependency to a specific version: + + cargo update -p foo --precise 1.2.3 + +== SEE ALSO +man:cargo[1], man:cargo-generate-lockfile[1] diff --git a/src/doc/man/cargo-verify-project.adoc b/src/doc/man/cargo-verify-project.adoc new file mode 100644 index 000000000..7b963f8c5 --- /dev/null +++ b/src/doc/man/cargo-verify-project.adoc @@ -0,0 +1,57 @@ += cargo-verify-project(1) +:idprefix: cargo_verify-project_ +:doctype: manpage + +== NAME + +cargo-verify-project - Check correctness of crate manifest + +== SYNOPSIS + +`cargo verify-project [_OPTIONS_]` + +== DESCRIPTION + +This command will parse the local manifest and check its validity. It emits a +JSON object with the result. A successful validation will display: + + {"success":"true"} + +An invalid workspace will display: + + {"invalid":"human-readable error message"} + +== OPTIONS + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-manifest-path.adoc[] + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +== Exit Status + +0:: + The workspace is OK. + +1:: + The workspace is invalid. + +== EXAMPLES + +. Check the current workspace for errors: + + cargo verify-project + +== SEE ALSO +man:cargo[1], man:cargo-package[1] diff --git a/src/doc/man/cargo-version.adoc b/src/doc/man/cargo-version.adoc new file mode 100644 index 000000000..4c3bb7a1b --- /dev/null +++ b/src/doc/man/cargo-version.adoc @@ -0,0 +1,39 @@ += cargo-version(1) +:idprefix: cargo_version_ +:doctype: manpage + +== NAME + +cargo-version - Show version information + +== SYNOPSIS + +`cargo version [_OPTIONS_]` + +== DESCRIPTION + +Displays the version of Cargo. + +== OPTIONS + +*-v*:: +*--verbose*:: + Display additional version information. + +== EXAMPLES + +. Display the version: + + cargo version + +. The version is also available via flags: + + cargo --version + cargo -V + +. Display extra version information: + + cargo -Vv + +== SEE ALSO +man:cargo[1] diff --git a/src/doc/man/cargo-yank.adoc b/src/doc/man/cargo-yank.adoc new file mode 100644 index 000000000..99f430826 --- /dev/null +++ b/src/doc/man/cargo-yank.adoc @@ -0,0 +1,64 @@ += cargo-yank(1) +:idprefix: cargo_yank_ +:doctype: manpage + +== NAME + +cargo-yank - Remove a pushed crate from the index + +== SYNOPSIS + +`cargo yank [_OPTIONS_] --vers _VERSION_ [_CRATE_]` + +== DESCRIPTION + +The yank command removes a previously published crate's version from the +server's index. This command does not delete any data, and the crate will +still be available for download via the registry's download link. + +Note that existing crates locked to a yanked version will still be able to +download the yanked version to use it. Cargo will, however, not allow any new +crates to be locked to any yanked version. + +This command requires you to be authenticated with either the `--token` option +or using man:cargo-login[1]. + +If the crate name is not specified, it will use the package name from the +current directory. + +== OPTIONS + +=== Owner Options + +*--vers* _VERSION_:: + The version to yank or un-yank. + +*--undo*:: + Undo a yank, putting a version back into the index. + +include::options-token.adoc[] + +include::options-index.adoc[] + +include::options-registry.adoc[] + +=== Display Options + +include::options-display.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== EXAMPLES + +. Yank a crate from the index: + + cargo yank --vers 1.0.7 foo + +== SEE ALSO +man:cargo[1], man:cargo-login[1], man:cargo-publish[1] diff --git a/src/doc/man/cargo.adoc b/src/doc/man/cargo.adoc new file mode 100644 index 000000000..8fbbac8dd --- /dev/null +++ b/src/doc/man/cargo.adoc @@ -0,0 +1,217 @@ += cargo(1) +:doctype: manpage + +== NAME + +cargo - The Rust package manager + +== SYNOPSIS + +[%hardbreaks] +`cargo [_OPTIONS_] _COMMAND_ [_ARGS_]` +`cargo [_OPTIONS_] --version` +`cargo [_OPTIONS_] --list` +`cargo [_OPTIONS_] --help` +`cargo [_OPTIONS_] --explain _CODE_` + +== DESCRIPTION + +This program is a package manager and build tool for the Rust language, +available at . + +== COMMANDS + +=== Build Commands + +man:cargo-bench[1]:: + Execute benchmarks of a package. + +man:cargo-build[1]:: + Compile a package. + +man:cargo-check[1]:: + Check a local package and all of its dependencies for errors. + +man:cargo-clean[1]:: + Remove artifacts that Cargo has generated in the past. + +man:cargo-doc[1]:: + Build a package's documentation. + +man:cargo-fetch[1]:: + Fetch dependencies of a package from the network. + +man:cargo-fix[1]:: + Automatically fix lint warnings reported by rustc. + +man:cargo-run[1]:: + Run a binary or example of the local package. + +man:cargo-rustc[1]:: + Compile a package, and pass extra options to the compiler. + +man:cargo-rustdoc[1]:: + Build a package's documentation, using specified custom flags. + +man:cargo-test[1]:: + Execute unit and integration tests of a package. + +=== Manifest Commands + +man:cargo-generate-lockfile[1]:: + Generate `Cargo.lock` for a project. + +man:cargo-locate-project[1]:: + Print a JSON representation of a `Cargo.toml` file's location. + +man:cargo-metadata[1]:: + Output the resolved dependencies of a package, the concrete used versions + including overrides, in machine-readable format. + +man:cargo-pkgid[1]:: + Print a fully qualified package specification. + +man:cargo-update[1]:: + Update dependencies as recorded in the local lock file. + +man:cargo-verify-project[1]:: + Check correctness of crate manifest. + +=== Package Commands + +man:cargo-init[1]:: + Create a new Cargo package in an existing directory. + +man:cargo-install[1]:: + Build and install a Rust binary. + +man:cargo-new[1]:: + Create a new Cargo package. + +man:cargo-search[1]:: + Search packages in crates.io. + +man:cargo-uninstall[1]:: + Remove a Rust binary. + +=== Publishing Commands + +man:cargo-login[1]:: + Save an API token from the registry locally. + +man:cargo-owner[1]:: + Manage the owners of a crate on the registry. + +man:cargo-package[1]:: + Assemble the local package into a distributable tarball. + +man:cargo-publish[1]:: + Upload a package to the registry. + +man:cargo-yank[1]:: + Remove a pushed crate from the index. + +=== General Commands + +man:cargo-help[1]:: + Display help information about Cargo. + +man:cargo-version[1]:: + Show version information. + +== OPTIONS + +=== Special Options + +*-V*:: +*--version*:: + Print version info and exit. If used with `--verbose`, prints extra + information. + +*--list*:: + List all installed Cargo subcommands. If used with `--verbose`, prints + extra information. + +*--explain _CODE_*:: + Run `rustc --explain CODE` which will print out a detailed explanation of + an error message (for example, `E0004`). + +=== Display Options + +include::options-display.adoc[] + +=== Manifest Options + +include::options-locked.adoc[] + +=== Common Options + +include::options-common.adoc[] + +include::section-environment.adoc[] + +include::section-exit-status.adoc[] + +== FILES + +`~/.cargo/`:: + Default location for Cargo's "home" directory where it stores various + files. The location can be changed with the `CARGO_HOME` environment + variable. + +`$CARGO_HOME/bin/`:: + Binaries installed by man:cargo-install[1] will be located here. If using + rustup, executables distributed with Rust are also located here. + +`$CARGO_HOME/config`:: + The global configuration file. See linkcargo:reference/config.html[the reference] + for more information about configuration files. + +`.cargo/config`:: + Cargo automatically searches for a file named `.cargo/config` in the + current directory, and all parent directories. These configuration files + will be merged with the global configuration file. + +`$CARGO_HOME/credentials`:: + Private authentication information for logging in to a registry. + +`$CARGO_HOME/registry/`:: + This directory contains cached downloads of the registry index and any + downloaded dependencies. + +`$CARGO_HOME/git/`:: + This directory contains cached downloads of git dependencies. + +== EXAMPLES + +. Build a local package and all of its dependencies: + + cargo build + +. Build a package with optimizations: + + cargo build --release + +. Run tests for a cross-compiled target: + + cargo test --target i686-unknown-linux-gnu + +. Create a new package that builds an executable: + + cargo new foobar + +. Create a package in the current directory: + + mkdir foo && cd foo + cargo init . + +. Learn about a command's options and usage: + + cargo help clean + +== BUGS + +See https://github.com/rust-lang/cargo/issues for issues. + +== SEE ALSO +man:rustc[1], man:rustdoc[1] diff --git a/src/doc/man/description-install-root.adoc b/src/doc/man/description-install-root.adoc new file mode 100644 index 000000000..d7773d3b2 --- /dev/null +++ b/src/doc/man/description-install-root.adoc @@ -0,0 +1,7 @@ +The installation root is determined, in order of precedence: + +- `--root` option +- `CARGO_INSTALL_ROOT` environment variable +- `install.root` Cargo linkcargo:reference/config.html[config value] +- `CARGO_HOME` environment variable +- `$HOME/.cargo` diff --git a/src/doc/man/description-new-authors.adoc b/src/doc/man/description-new-authors.adoc new file mode 100644 index 000000000..0435295b7 --- /dev/null +++ b/src/doc/man/description-new-authors.adoc @@ -0,0 +1,24 @@ +The "authors" field in the manifest is determined from the environment or +configuration settings. A name is required and is determined from (first match +wins): + +- `cargo-new.name` Cargo config value +- `CARGO_NAME` environment variable +- `GIT_AUTHOR_NAME` environment variable +- `GIT_COMMITTER_NAME` environment variable +- `user.name` git configuration value +- `USER` environment variable +- `USERNAME` environment variable +- `NAME` environment variable + +The email address is optional and is determined from: + +- `cargo-new.email` Cargo config value +- `CARGO_EMAIL` environment variable +- `GIT_AUTHOR_EMAIL` environment variable +- `GIT_COMMITTER_EMAIL` environment variable +- `user.email` git configuration value +- `EMAIL` environment variable + +See linkcargo:reference/config.html[the reference] for more information about +configuration files. diff --git a/src/doc/man/description-one-target.adoc b/src/doc/man/description-one-target.adoc new file mode 100644 index 000000000..7af18131f --- /dev/null +++ b/src/doc/man/description-one-target.adoc @@ -0,0 +1,4 @@ +This command requires that only one target is being compiled when additional +arguments are provided. If more than one target is available for the current +package the filters of `--lib`, `--bin`, etc, must be used to select which +target is compiled. diff --git a/src/doc/man/generated/cargo-bench.html b/src/doc/man/generated/cargo-bench.html new file mode 100644 index 000000000..638dcf898 --- /dev/null +++ b/src/doc/man/generated/cargo-bench.html @@ -0,0 +1,466 @@ +

NAME

+
+

cargo-bench - Execute benchmarks of a package

+
+
+

SYNOPSIS

+
+
+

cargo bench [OPTIONS] [BENCHNAME] [-- BENCH-OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Compile and execute benchmarks.

+
+
+

The benchmark filtering argument BENCHNAME and all the arguments following +the two dashes (--) are passed to the benchmark binaries and thus to +libtest (rustc’s built in unit-test and micro-benchmarking framework). If +you’re passing arguments to both Cargo and the binary, the ones after -- go +to the binary, the ones before go to Cargo. For details about libtest’s +arguments see the output of cargo bench — --help. As an example, this will +run only the benchmark named foo (and skip other similarly named benchmarks +like foobar):

+
+
+
+
cargo bench -- foo --exact
+
+
+
+

Benchmarks are built with the --test option to rustc which creates an +executable with a main function that automatically runs all functions +annotated with the #[bench] attribute. Cargo passes the --bench flag to +the test harness to tell it to run only benchmarks.

+
+
+

The libtest harness may be disabled by setting harness = false in the target +manifest settings, in which case your code will need to provide its own main +function to handle running benchmarks.

+
+
+
+
+

OPTIONS

+
+
+

Benchmark Options

+
+
+
--no-run
+
+

Compile, but don’t run benchmarks.

+
+
--no-fail-fast
+
+

Run all benchmarks regardless of failure. Without this flag, Cargo will exit +after the first executable fails. The Rust test harness will run all +benchmarks within the executable to completion, this flag only applies to +the executable as a whole.

+
+
+
+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Benchmark only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Benchmark all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo bench will build the +following targets of the selected packages:

+
+
+
    +
  • +

    lib — used to link with binaries and benchmarks

    +
  • +
  • +

    bins (only if benchmark targets are built and required features are +available)

    +
  • +
  • +

    lib as a benchmark

    +
  • +
  • +

    bins as benchmarks

    +
  • +
  • +

    benchmark targets

    +
  • +
+
+
+

The default behavior can be changed by setting the bench flag for the target +in the manifest settings. Setting examples to bench = true will build and +run the example as a benchmark. Setting targets to bench = false will stop +them from being benchmarked by default. Target selection options that take a +target by name ignore the bench flag and will always benchmark the given +target.

+
+
+

Passing target selection flags will benchmark only the +specified targets.

+
+
+
+
--lib
+
+

Benchmark the package’s library.

+
+
--bin NAME…​
+
+

Benchmark the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Benchmark all binary targets.

+
+
--example NAME…​
+
+

Benchmark the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Benchmark all example targets.

+
+
--test NAME…​
+
+

Benchmark the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Benchmark all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Benchmark the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Benchmark all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Benchmark all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Benchmark for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+

By default the Rust test harness hides output from benchmark execution to keep +results readable. Benchmark output can be recovered (e.g., for debugging) by +passing --nocapture to the benchmark binaries:

+
+
+
+
cargo bench -- --nocapture
+
+
+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+

The --jobs argument affects the building of the benchmark executable but +does not affect how many threads are used when running the benchmarks. The +Rust test harness runs benchmarks serially in a single thread.

+
+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Benchmarks are always built with the bench profile. Binary and lib targets +are built separately as benchmarks with the bench profile. Library targets +are built with the release profiles when linked to binaries and benchmarks. +Dependencies use the release profile.

+
+
+

If you need a debug build of a benchmark, try building it with +cargo-build(1) which will use the test profile which is by default +unoptimized and includes debug information. You can then run the debug-enabled +benchmark manually.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build and execute all the benchmarks of the current package:

    +
    +
    +
    cargo bench
    +
    +
    +
  2. +
  3. +

    Run only a specific benchmark within a specific benchmark target:

    +
    +
    +
    cargo bench --bench bench_name -- modname::some_benchmark
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-build.html b/src/doc/man/generated/cargo-build.html new file mode 100644 index 000000000..8739f2931 --- /dev/null +++ b/src/doc/man/generated/cargo-build.html @@ -0,0 +1,425 @@ +

NAME

+
+

cargo-build - Compile the current package

+
+
+

SYNOPSIS

+
+
+

cargo build [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Compile local packages and all of their dependencies.

+
+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Build only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Build all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo build will build all +binary and library targets of the selected packages. Binaries are skipped if +they have required-features that are missing.

+
+
+

Passing target selection flags will build only the +specified targets.

+
+
+
+
--lib
+
+

Build the package’s library.

+
+
--bin NAME…​
+
+

Build the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Build all binary targets.

+
+
--example NAME…​
+
+

Build the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Build all example targets.

+
+
--test NAME…​
+
+

Build the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Build all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Build the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Build all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Build all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Build for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Build optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
--out-dir DIRECTORY
+
+

Copy final artifacts to this directory.

+
+

This option is unstable and available only on the nightly channel and requires +the -Z unstable-options flag to enable.

+
+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
--build-plan
+
+

Outputs a series of JSON messages to stdout that indicate the commands to +run the build.

+
+

This option is unstable and available only on the nightly channel and requires +the -Z unstable-options flag to enable.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build the local package and all of its dependencies:

    +
    +
    +
    cargo build
    +
    +
    +
  2. +
  3. +

    Build with optimizations:

    +
    +
    +
    cargo build --release
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-check.html b/src/doc/man/generated/cargo-check.html new file mode 100644 index 000000000..8d98c14f8 --- /dev/null +++ b/src/doc/man/generated/cargo-check.html @@ -0,0 +1,420 @@ +

NAME

+
+

cargo-check - Check the current package

+
+
+

SYNOPSIS

+
+
+

cargo check [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Check a local package and all of its dependencies for errors. This will +essentially compile the packages without performing the final step of code +generation, which is faster than running cargo build. The compiler will save +metadata files to disk so that future runs will reuse them if the source has +not been modified.

+
+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Check only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Check all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo check will check all +binary and library targets of the selected packages. Binaries are skipped if +they have required-features that are missing.

+
+
+

Passing target selection flags will check only the +specified targets.

+
+
+
+
--lib
+
+

Check the package’s library.

+
+
--bin NAME…​
+
+

Check the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Check all binary targets.

+
+
--example NAME…​
+
+

Check the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Check all example targets.

+
+
--test NAME…​
+
+

Check the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Check all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Check the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Check all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Check all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Check for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Check optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
--profile NAME
+
+

Changes check behavior. Currently only test is +supported, which will check with the +#[cfg(test)] attribute enabled. This is useful to have it +check unit tests which are usually excluded via +the cfg attribute. This does not change the actual profile used.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Check the local package for errors:

    +
    +
    +
    cargo check
    +
    +
    +
  2. +
  3. +

    Check all targets, including unit tests:

    +
    +
    +
    cargo check --all-targets --profile=test
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-clean.html b/src/doc/man/generated/cargo-clean.html new file mode 100644 index 000000000..d5488dc4b --- /dev/null +++ b/src/doc/man/generated/cargo-clean.html @@ -0,0 +1,224 @@ +

NAME

+
+

cargo-clean - Remove generated artifacts

+
+
+

SYNOPSIS

+
+
+

cargo clean [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Remove artifacts from the target directory that Cargo has generated in the +past.

+
+
+

With no options, cargo clean will delete the entire target directory.

+
+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+

When no packages are selected, all packages and all dependencies in the +workspace are cleaned.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Clean only the specified packages. This flag may be specified +multiple times. See cargo-pkgid(1) for the SPEC format.

+
+
+
+
+
+

Clean Options

+
+
+
--doc
+
+

This option will cause cargo clean to remove only the doc directory in +the target directory.

+
+
--release
+
+

Clean all artifacts that were built with the release or bench +profiles.

+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
--target TRIPLE
+
+

Clean for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Remove the entire target directory:

    +
    +
    +
    cargo clean
    +
    +
    +
  2. +
  3. +

    Remove only the release artifacts:

    +
    +
    +
    cargo clean --release
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-doc.html b/src/doc/man/generated/cargo-doc.html new file mode 100644 index 000000000..a064ac2a6 --- /dev/null +++ b/src/doc/man/generated/cargo-doc.html @@ -0,0 +1,381 @@ +

NAME

+
+

cargo-doc - Build a package's documentation

+
+
+

SYNOPSIS

+
+
+

cargo doc [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Build the documentation for the local package and all dependencies. The output +is placed in target/doc in rustdoc’s usual format.

+
+
+
+
+

OPTIONS

+
+
+

Documentation Options

+
+
+
--open
+
+

Open the docs in a browser after building them.

+
+
--no-deps
+
+

Do not build documentation for dependencies.

+
+
--document-private-items
+
+

Include non-public items in the documentation.

+
+
+
+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Document only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Document all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo doc will document all +binary and library targets of the selected package. The binary will be skipped +if its name is the same as the lib target. Binaries are skipped if they have +required-features that are missing.

+
+
+

The default behavior can be changed by setting doc = false for the target in +the manifest settings. Using target selection options will ignore the doc +flag and will always document the given target.

+
+
+
+
--lib
+
+

Document the package’s library.

+
+
--bin NAME…​
+
+

Document the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Document all binary targets.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Document for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Document optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build the local package documentation and its dependencies and output to +target/doc.

    +
    +
    +
    cargo doc
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-fetch.html b/src/doc/man/generated/cargo-fetch.html new file mode 100644 index 000000000..371145989 --- /dev/null +++ b/src/doc/man/generated/cargo-fetch.html @@ -0,0 +1,188 @@ +

NAME

+
+

cargo-fetch - Fetch dependencies of a package from the network

+
+
+

SYNOPSIS

+
+
+

cargo fetch [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

If a Cargo.lock file is available, this command will ensure that all of the +git dependencies and/or registry dependencies are downloaded and locally +available. Subsequent Cargo commands never touch the network after a cargo +fetch unless the lock file changes.

+
+
+

If the lock file is not available, then this command will generate the lock +file before fetching the dependencies.

+
+
+

If --target is not specified, then all target dependencies are fetched.

+
+
+
+
+

OPTIONS

+
+
+

Fetch options

+
+
+
--target TRIPLE
+
+

Fetch for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Fetch all dependencies:

    +
    +
    +
    cargo fetch
    +
    +
    +
  2. +
+
+
+
+ \ No newline at end of file diff --git a/src/doc/man/generated/cargo-fix.html b/src/doc/man/generated/cargo-fix.html new file mode 100644 index 000000000..6f5839e13 --- /dev/null +++ b/src/doc/man/generated/cargo-fix.html @@ -0,0 +1,499 @@ +

NAME

+
+

cargo-fix - Automatically fix lint warnings reported by rustc

+
+
+

SYNOPSIS

+
+
+

cargo fix [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This Cargo subcommand will automatically take rustc’s suggestions from +diagnostics like warnings and apply them to your source code. This is intended +to help automate tasks that rustc itself already knows how to tell you to fix! +The cargo fix subcommand is also being developed for the Rust 2018 edition +to provide code the ability to easily opt-in to the new edition without having +to worry about any breakage.

+
+
+

Executing cargo fix will under the hood execute cargo-check(1). Any warnings +applicable to your crate will be automatically fixed (if possible) and all +remaining warnings will be displayed when the check process is finished. For +example if you’d like to prepare for the 2018 edition, you can do so by +executing:

+
+
+
+
cargo fix --edition
+
+
+
+

which behaves the same as cargo check --all-targets. Similarly if you’d like +to fix code for different platforms you can do:

+
+
+
+
cargo fix --edition --target x86_64-pc-windows-gnu
+
+
+
+

or if your crate has optional features:

+
+
+
+
cargo fix --edition --no-default-features --features foo
+
+
+
+

If you encounter any problems with cargo fix or otherwise have any questions +or feature requests please don’t hesitate to file an issue at +https://github.com/rust-lang/cargo

+
+
+
+
+

OPTIONS

+
+
+

Fix options

+
+
+
--broken-code
+
+

Fix code even if it already has compiler errors. This is useful if cargo +fix fails to apply the changes. It will apply the changes and leave the +broken code in the working directory for you to inspect and manually fix.

+
+
--edition
+
+

Apply changes that will update the code to the latest edition. This will +not update the edition in the Cargo.toml manifest, which must be updated +manually.

+
+
--edition-idioms
+
+

Apply suggestions that will update code to the preferred style for the +current edition.

+
+
--allow-no-vcs
+
+

Fix code even if a VCS was not detected.

+
+
--allow-dirty
+
+

Fix code even if the working directory has changes.

+
+
--allow-staged
+
+

Fix code even if the working directory has staged changes.

+
+
+
+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Fix only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Fix all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo fix will fix all targets +(--all-targets implied). Binaries are skipped if they have +required-features that are missing.

+
+
+

Passing target selection flags will fix only the +specified targets.

+
+
+
+
--lib
+
+

Fix the package’s library.

+
+
--bin NAME…​
+
+

Fix the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Fix all binary targets.

+
+
--example NAME…​
+
+

Fix the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Fix all example targets.

+
+
--test NAME…​
+
+

Fix the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Fix all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Fix the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Fix all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Fix all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Fix for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Fix optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
--profile NAME
+
+

Changes fix behavior. Currently only test is +supported, which will fix with the +#[cfg(test)] attribute enabled. This is useful to have it +fix unit tests which are usually excluded via +the cfg attribute. This does not change the actual profile used.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Apply compiler suggestions to the local package:

    +
    +
    +
    cargo fix
    +
    +
    +
  2. +
  3. +

    Convert a 2015 edition to 2018:

    +
    +
    +
    cargo fix --edition
    +
    +
    +
  4. +
  5. +

    Apply suggested idioms for the current edition:

    +
    +
    +
    cargo fix --edition-idioms
    +
    +
    +
  6. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-generate-lockfile.html b/src/doc/man/generated/cargo-generate-lockfile.html new file mode 100644 index 000000000..93c1ff56b --- /dev/null +++ b/src/doc/man/generated/cargo-generate-lockfile.html @@ -0,0 +1,166 @@ +

NAME

+
+

cargo-generate-lockfile - Generate the lockfile for a package

+
+
+

SYNOPSIS

+
+
+

cargo generate-lockfile [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will create the Cargo.lock lockfile for the current package or +workspace. If the lockfile already exists, it will be rebuilt if there are any +manifest changes or dependency updates.

+
+
+

See also cargo-update(1) which is also capable of creating a Cargo.lock +lockfile and has more options for controlling update behavior.

+
+
+
+
+

OPTIONS

+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Create or update the lockfile for the current package or workspace:

    +
    +
    +
    cargo generate-lockfile
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-help.html b/src/doc/man/generated/cargo-help.html new file mode 100644 index 000000000..29c367ccd --- /dev/null +++ b/src/doc/man/generated/cargo-help.html @@ -0,0 +1,53 @@ +

NAME

+
+

cargo-help - Get help for a Cargo command

+
+
+

SYNOPSIS

+
+
+

cargo help [SUBCOMMAND]

+
+
+
+
+

DESCRIPTION

+
+
+

Prints a help message for the given command.

+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Get help for a command:

    +
    +
    +
    cargo help build
    +
    +
    +
  2. +
  3. +

    Help is also available with the --help flag:

    +
    +
    +
    cargo build --help
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+
+ +
+
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-init.html b/src/doc/man/generated/cargo-init.html new file mode 100644 index 000000000..979b330a4 --- /dev/null +++ b/src/doc/man/generated/cargo-init.html @@ -0,0 +1,255 @@ +

NAME

+
+

cargo-init - Create a new Cargo package in an existing directory

+
+
+

SYNOPSIS

+
+
+

cargo init [OPTIONS] [PATH]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will create a new Cargo manifest in the current directory. Give a +path as an argument to create in the given directory.

+
+
+

If there are typically-named Rust source files already in the directory, those +will be used. If not, then a sample src/main.rs file will be created, or +src/lib.rs if --lib is passed.

+
+
+

If the directory is not already in a VCS repository, then a new repository +is created (see --vcs below).

+
+
+

The "authors" field in the manifest is determined from the environment or +configuration settings. A name is required and is determined from (first match +wins):

+
+
+
    +
  • +

    cargo-new.name Cargo config value

    +
  • +
  • +

    CARGO_NAME environment variable

    +
  • +
  • +

    GIT_AUTHOR_NAME environment variable

    +
  • +
  • +

    GIT_COMMITTER_NAME environment variable

    +
  • +
  • +

    user.name git configuration value

    +
  • +
  • +

    USER environment variable

    +
  • +
  • +

    USERNAME environment variable

    +
  • +
  • +

    NAME environment variable

    +
  • +
+
+
+

The email address is optional and is determined from:

+
+
+
    +
  • +

    cargo-new.email Cargo config value

    +
  • +
  • +

    CARGO_EMAIL environment variable

    +
  • +
  • +

    GIT_AUTHOR_EMAIL environment variable

    +
  • +
  • +

    GIT_COMMITTER_EMAIL environment variable

    +
  • +
  • +

    user.email git configuration value

    +
  • +
  • +

    EMAIL environment variable

    +
  • +
+
+
+

See the reference for more information about +configuration files.

+
+
+

See cargo-new(1) for a similar command which will create a new package in +a new directory.

+
+
+
+
+

OPTIONS

+
+
+

Init Options

+
+
+
--bin
+
+

Create a package with a binary target (src/main.rs). +This is the default behavior.

+
+
--lib
+
+

Create a package with a library target (src/lib.rs).

+
+
--edition EDITION
+
+

Specify the Rust edition to use. Default is 2018. +Possible values: 2015, 2018

+
+
--name NAME
+
+

Set the package name. Defaults to the directory name.

+
+
--vcs VCS
+
+

Initialize a new VCS repository for the given version control system (git, +hg, pijul, or fossil) or do not initialize any version control at all +(none). If not specified, defaults to git or the configuration value +cargo-new.vcs, or none if already inside a VCS repository.

+
+
--registry REGISTRY
+
+

This sets the publish field in Cargo.toml to the given registry name +which will restrict publishing only to that registry.

+
+

Registry names are defined in Cargo config files. +If not specified, the default registry defined by the registry.default +config key is used. If the default registry is not set and --registry is not +used, the publish field will not be set which means that publishing will not +be restricted.

+
+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Create a binary Cargo package in the current directory:

    +
    +
    +
    cargo init
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-install.html b/src/doc/man/generated/cargo-install.html new file mode 100644 index 000000000..bd5d6fb24 --- /dev/null +++ b/src/doc/man/generated/cargo-install.html @@ -0,0 +1,322 @@ +

NAME

+
+

cargo-install - Build and install a Rust binary

+
+
+

SYNOPSIS

+
+
+

cargo install [OPTIONS] CRATE…​
+cargo install [OPTIONS] --path PATH
+cargo install [OPTIONS] --git URL [CRATE…​]
+cargo install [OPTIONS] --list

+
+
+
+
+

DESCRIPTION

+
+
+

This command manages Cargo’s local set of installed binary crates. Only packages +which have [[bin]] targets can be installed, and all binaries are installed into +the installation root’s bin folder.

+
+
+

The installation root is determined, in order of precedence:

+
+
+
    +
  • +

    --root option

    +
  • +
  • +

    CARGO_INSTALL_ROOT environment variable

    +
  • +
  • +

    install.root Cargo config value

    +
  • +
  • +

    CARGO_HOME environment variable

    +
  • +
  • +

    $HOME/.cargo

    +
  • +
+
+
+

There are multiple sources from which a crate can be installed. The default +location is crates.io but the --git and --path flags can change this +source. If the source contains more than one package (such as crates.io or a +git repository with multiple crates) the CRATE argument is required to +indicate which crate should be installed.

+
+
+

Crates from crates.io can optionally specify the version they wish to install +via the --version flags, and similarly packages from git repositories can +optionally specify the branch, tag, or revision that should be installed. If a +crate has multiple binaries, the --bin argument can selectively install only +one of them, and if you’d rather install examples the --example argument can +be used as well.

+
+
+

If the source is crates.io or --git then by default the crate will be built +in a temporary target directory. To avoid this, the target directory can be +specified by setting the CARGO_TARGET_DIR environment variable to a relative +path. In particular, this can be useful for caching build artifacts on +continuous integration systems.

+
+
+
+
+

OPTIONS

+
+
+

Install Options

+
+
+
--vers VERSION
+
--version VERSION
+
+

Specify a version to install from crates.io.

+
+
--git URL
+
+

Git URL to install the specified crate from.

+
+
--branch BRANCH
+
+

Branch to use when installing from git.

+
+
--tag TAG
+
+

Tag to use when installing from git.

+
+
--rev SHA
+
+

Specific commit to use when installing from git.

+
+
--path PATH
+
+

Filesystem path to local crate to install.

+
+
--list
+
+

List all installed packages and their versions.

+
+
-f
+
--force
+
+

Force overwriting existing crates or binaries. This can be used to +reinstall or upgrade a crate.

+
+
--bin NAME…​
+
+

Install only the specified binary.

+
+
--bins
+
+

Install all binaries.

+
+
--example NAME…​
+
+

Install only the specified example.

+
+
--examples
+
+

Install all examples.

+
+
--root DIR
+
+

Directory to install packages into.

+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Install for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--debug
+
+

Build with the dev profile instead the release profile.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Install a package from crates.io:

    +
    +
    +
    cargo install ripgrep
    +
    +
    +
  2. +
  3. +

    Reinstall or upgrade a package:

    +
    +
    +
    cargo install ripgrep --force
    +
    +
    +
  4. +
+
+
+
+ \ No newline at end of file diff --git a/src/doc/man/generated/cargo-locate-project.html b/src/doc/man/generated/cargo-locate-project.html new file mode 100644 index 000000000..42dfa876b --- /dev/null +++ b/src/doc/man/generated/cargo-locate-project.html @@ -0,0 +1,152 @@ +

NAME

+
+

cargo-locate-project - Print a JSON representation of a Cargo.toml file's location

+
+
+

SYNOPSIS

+
+
+

cargo locate-project [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will print a JSON object to stdout with the full path to the +Cargo.toml manifest.

+
+
+

See also cargo-metadata(1) which is capable of returning the path to a +workspace root.

+
+
+
+
+

OPTIONS

+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Display the path to the manifest based on the current directory:

    +
    +
    +
    cargo locate-project
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-login.html b/src/doc/man/generated/cargo-login.html new file mode 100644 index 000000000..b44377b34 --- /dev/null +++ b/src/doc/man/generated/cargo-login.html @@ -0,0 +1,160 @@ +

NAME

+
+

cargo-login - Save an API token from the registry locally

+
+
+

SYNOPSIS

+
+
+

cargo login [OPTIONS] [TOKEN]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will save the API token to disk so that commands that require +authentication, such as cargo-publish(1), will be automatically +authenticated. The token is saved in $CARGO_HOME/credentials. CARGO_HOME +defaults to .cargo in your home directory.

+
+
+

If the TOKEN argument is not specified, it will be read from stdin.

+
+
+

The API token for crates.io may be retrieved from https://crates.io/me.

+
+
+

Take care to keep the token secret, it should not be shared with anyone else.

+
+
+
+
+

OPTIONS

+
+
+

Login Options

+
+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Save the API token to disk:

    +
    +
    +
    cargo login
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-metadata.html b/src/doc/man/generated/cargo-metadata.html new file mode 100644 index 000000000..271a8ea97 --- /dev/null +++ b/src/doc/man/generated/cargo-metadata.html @@ -0,0 +1,438 @@ +

NAME

+
+

cargo-metadata - Machine-readable metadata about the current package

+
+
+

SYNOPSIS

+
+
+

cargo metadata [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Output the resolved dependencies of a package, the concrete used versions +including overrides, in JSON to stdout.

+
+
+

It is recommended to include the --format-version flag to future-proof +your code to ensure the output is in the format you are expecting.

+
+
+

See the cargo_metadata crate +for a Rust API for reading the metadata.

+
+
+
+
+

OUTPUT FORMAT

+
+
+

The output has the following format:

+
+
+
+
{
+    /* Array of all packages in the workspace.
+       It also includes all feature-enabled dependencies unless --no-deps is used.
+    */
+    "packages": [
+        {
+            /* The name of the package. */
+            "name": "my-package",
+            /* The version of the package. */
+            "version": "0.1.0",
+            /* The Package ID, a unique identifier for referring to the package. */
+            "id": "my-package 0.1.0 (path+file:///path/to/my-package)",
+            /* The license value from the manifest, or null. */
+            "license": "MIT/Apache-2.0",
+            /* The license-file value from the manifest, or null. */
+            "license_file": "LICENSE",
+            /* The description value from the manifest, or null. */
+            "description": "Package description.",
+            /* The source ID of the package. This represents where
+               a package is retrieved from.
+               This is null for path dependencies and workspace members.
+               For other dependencies, it is a string with the format:
+               - "registry+URL" for registry-based dependencies.
+                 Example: "registry+https://github.com/rust-lang/crates.io-index"
+               - "git+URL" for git-based dependencies.
+                 Example: "git+https://github.com/rust-lang/cargo?rev=5e85ba14aaa20f8133863373404cb0af69eeef2c#5e85ba14aaa20f8133863373404cb0af69eeef2c"
+            */
+            "source": null,
+            /* Array of dependencies declared in the package's manifest. */
+            "dependencies": [
+                {
+                    /* The name of the dependency. */
+                    "name": "bitflags",
+                    /* The source ID of the dependency. May be null, see
+                       description for the package source.
+                    */
+                    "source": "registry+https://github.com/rust-lang/crates.io-index",
+                    /* The version requirement for the dependency.
+                       Dependencies without a version requirement have a value of "*".
+                    */
+                    "req": "^1.0",
+                    /* The dependency kind.
+                       "dev", "build", or null for a normal dependency.
+                    */
+                    "kind": null,
+                    /* If the dependency is renamed, this is the new name for
+                       the dependency as a string.  null if it is not renamed.
+                    */
+                    "rename": null,
+                    /* Boolean of whether or not this is an optional dependency. */
+                    "optional": false,
+                    /* Boolean of whether or not default features are enabled. */
+                    "uses_default_features": true,
+                    /* Array of features enabled. */
+                    "features": [],
+                    /* The target platform for the dependency.
+                       null if not a target dependency.
+                    */
+                    "target": "cfg(windows)",
+                    /* A string of the URL of the registry this dependency is from.
+                       If not specified or null, the dependency is from the default
+                       registry (crates.io).
+                    */
+                    "registry": null
+                }
+            ],
+            /* Array of Cargo targets. */
+            "targets": [
+                {
+                    /* Array of target kinds.
+                       - lib targets list the `crate-type` values from the
+                         manifest such as "lib", "rlib", "dylib",
+                         "proc-macro", etc. (default ["lib"])
+                       - binary is ["bin"]
+                       - example is ["example"]
+                       - integration test is ["test"]
+                       - benchmark is ["bench"]
+                       - build script is ["custom-build"]
+                    */
+                    "kind": [
+                        "bin"
+                    ],
+                    /* Array of crate types.
+                       - lib and example libraries list the `crate-type` values
+                         from the manifest such as "lib", "rlib", "dylib",
+                         "proc-macro", etc. (default ["lib"])
+                       - all other target kinds are ["bin"]
+                    */
+                    "crate_types": [
+                        "bin"
+                    ],
+                    /* The name of the target. */
+                    "name": "my-package",
+                    /* Absolute path to the root source file of the target. */
+                    "src_path": "/path/to/my-package/src/main.rs",
+                    /* The Rust edition of the target.
+                       Defaults to the package edition.
+                    */
+                    "edition": "2018",
+                    /* Array of required features.
+                       This property is not included if no required features are set.
+                    */
+                    "required-features": ["feat1"]
+                }
+            ],
+            /* Set of features defined for the package.
+               Each feature maps to an array of features or dependencies it
+               enables.
+            */
+            "features": {
+                "default": [
+                    "feat1"
+                ],
+                "feat1": [],
+                "feat2": []
+            },
+            /* Absolute path to this package's manifest. */
+            "manifest_path": "/path/to/my-package/Cargo.toml",
+            /* Package metadata.
+               This is null if no metadata is specified.
+            */
+            "metadata": {
+                "docs": {
+                    "rs": {
+                        "all-features": true
+                    }
+                }
+            },
+            /* Array of authors from the manifest.
+               Empty array if no authors specified.
+            */
+            "authors": [
+                "Jane Doe <user@example.com>"
+            ],
+            /* Array of categories from the manifest. */
+            "categories": [
+                "command-line-utilities"
+            ],
+            /* Array of keywords from the manifest. */
+            "keywords": [
+                "cli"
+            ],
+            /* The readme value from the manifest or null if not specified. */
+            "readme": "README.md",
+            /* The repository value from the manifest or null if not specified. */
+            "repository": "https://github.com/rust-lang/cargo",
+            /* The default edition of the package.
+               Note that individual targets may have different editions.
+            */
+            "edition": "2018",
+            /* Optional string that is the name of a native library the package
+               is linking to.
+            */
+            "links": null,
+        }
+    ],
+    /* Array of members of the workspace.
+       Each entry is the Package ID for the package.
+    */
+    "workspace_members": [
+        "my-package 0.1.0 (path+file:///path/to/my-package)",
+    ],
+    /* The resolved dependency graph, with the concrete versions and features
+       selected. The set depends on the enabled features.
+       This is null if --no-deps is specified.
+    */
+    "resolve": {
+        /* Array of nodes within the dependency graph.
+           Each node is a package.
+        */
+        "nodes": [
+            {
+                /* The Package ID of this node. */
+                "id": "my-package 0.1.0 (path+file:///path/to/my-package)",
+                /* The dependencies of this package, an array of Package IDs. */
+                "dependencies": [
+                    "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)"
+                ],
+                /* The dependencies of this package. This is an alternative to
+                   "dependencies" which contains additional information. In
+                   particular, this handles renamed dependencies.
+                */
+                "deps": [
+                    {
+                        /* The name of the dependency.
+                           If this is a renamed dependency, this is the new
+                           name.
+                        */
+                        "name": "bitflags",
+                        /* The Package ID of the dependency. */
+                        "pkg": "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)"
+                    }
+                ],
+                /* Array of features enabled on this package. */
+                "features": [
+                    "default"
+                ]
+            }
+        ],
+        /* The root package of the workspace.
+           This is null if this is a virtual workspace. Otherwise it is
+           the Package ID of the root package.
+        */
+        "root": "my-package 0.1.0 (path+file:///path/to/my-package)"
+    },
+    /* The absolute path to the build directory where Cargo places its output. */
+    "target_directory": "/path/to/my-package/target",
+    /* The version of the schema for this metadata structure.
+       This will be changed if incompatible changes are ever made.
+    */
+    "version": 1,
+    /* The absolute path to the root of the workspace. */
+    "workspace_root": "/path/to/my-package"
+}
+
+
+
+
+
+

OPTIONS

+
+
+

Output Options

+
+
+
--no-deps
+
+

Output information only about the workspace members and don’t fetch +dependencies.

+
+
--format-version VERSION
+
+

Specify the version of the output format to use. Currently 1 is the only +possible value.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Output JSON about the current package:

    +
    +
    +
    cargo metadata --format-version=1
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+
+ +
+
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-new.html b/src/doc/man/generated/cargo-new.html new file mode 100644 index 000000000..adf6c46d7 --- /dev/null +++ b/src/doc/man/generated/cargo-new.html @@ -0,0 +1,248 @@ +

NAME

+
+

cargo-new - Create a new Cargo package

+
+
+

SYNOPSIS

+
+
+

cargo new [OPTIONS] PATH

+
+
+
+
+

DESCRIPTION

+
+
+

This command will create a new Cargo package in the given directory. This +includes a simple template with a Cargo.toml manifest, sample source file, +and a VCS ignore file. If the directory is not already in a VCS repository, +then a new repository is created (see --vcs below).

+
+
+

The "authors" field in the manifest is determined from the environment or +configuration settings. A name is required and is determined from (first match +wins):

+
+
+
    +
  • +

    cargo-new.name Cargo config value

    +
  • +
  • +

    CARGO_NAME environment variable

    +
  • +
  • +

    GIT_AUTHOR_NAME environment variable

    +
  • +
  • +

    GIT_COMMITTER_NAME environment variable

    +
  • +
  • +

    user.name git configuration value

    +
  • +
  • +

    USER environment variable

    +
  • +
  • +

    USERNAME environment variable

    +
  • +
  • +

    NAME environment variable

    +
  • +
+
+
+

The email address is optional and is determined from:

+
+
+
    +
  • +

    cargo-new.email Cargo config value

    +
  • +
  • +

    CARGO_EMAIL environment variable

    +
  • +
  • +

    GIT_AUTHOR_EMAIL environment variable

    +
  • +
  • +

    GIT_COMMITTER_EMAIL environment variable

    +
  • +
  • +

    user.email git configuration value

    +
  • +
  • +

    EMAIL environment variable

    +
  • +
+
+
+

See the reference for more information about +configuration files.

+
+
+

See cargo-init(1) for a similar command which will create a new manifest +in an existing directory.

+
+
+
+
+

OPTIONS

+
+
+

New Options

+
+
+
--bin
+
+

Create a package with a binary target (src/main.rs). +This is the default behavior.

+
+
--lib
+
+

Create a package with a library target (src/lib.rs).

+
+
--edition EDITION
+
+

Specify the Rust edition to use. Default is 2018. +Possible values: 2015, 2018

+
+
--name NAME
+
+

Set the package name. Defaults to the directory name.

+
+
--vcs VCS
+
+

Initialize a new VCS repository for the given version control system (git, +hg, pijul, or fossil) or do not initialize any version control at all +(none). If not specified, defaults to git or the configuration value +cargo-new.vcs, or none if already inside a VCS repository.

+
+
--registry REGISTRY
+
+

This sets the publish field in Cargo.toml to the given registry name +which will restrict publishing only to that registry.

+
+

Registry names are defined in Cargo config files. +If not specified, the default registry defined by the registry.default +config key is used. If the default registry is not set and --registry is not +used, the publish field will not be set which means that publishing will not +be restricted.

+
+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Create a binary Cargo package in the given directory:

    +
    +
    +
    cargo new foo
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-owner.html b/src/doc/man/generated/cargo-owner.html new file mode 100644 index 000000000..21eab2e8d --- /dev/null +++ b/src/doc/man/generated/cargo-owner.html @@ -0,0 +1,212 @@ +

NAME

+
+

cargo-owner - Manage the owners of a crate on the registry

+
+
+

SYNOPSIS

+
+
+

cargo owner [OPTIONS] --add LOGIN [CRATE]
+cargo owner [OPTIONS] --remove LOGIN [CRATE]
+cargo owner [OPTIONS] --list [CRATE]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will modify the owners for a crate on the registry. Owners of a +crate can upload new versions and yank old versions. Non-team owners can also +modify the set of owners, so take care!

+
+
+

This command requires you to be authenticated with either the --token option +or using cargo-login(1).

+
+
+

If the crate name is not specified, it will use the package name from the +current directory.

+
+
+

See the reference for more +information about owners and publishing.

+
+
+
+
+

OPTIONS

+
+
+

Owner Options

+
+
+
-a
+
--add LOGIN…​
+
+

Invite the given user or team as an owner.

+
+
-r
+
--remove LOGIN…​
+
+

Remove the given user or team as an owner.

+
+
-l
+
--list
+
+

List owners of a crate.

+
+
--token TOKEN
+
+

API token to use when authenticating. This overrides the token stored in +the credentials file (which is created by cargo-login(1)).

+
+

Cargo config environment variables can be +used to override the tokens stored in the credentials file. The token for +crates.io may be specified with the CARGO_REGISTRY_TOKEN environment +variable. Tokens for other registries may be specified with environment +variables of the form CARGO_REGISTRIES_NAME_TOKEN where NAME is the name +of the registry in all capital letters.

+
+
+
--index INDEX
+
+

The URL of the registry index to use.

+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    List owners of a package:

    +
    +
    +
    cargo owner --list foo
    +
    +
    +
  2. +
  3. +

    Invite an owner to a package:

    +
    +
    +
    cargo owner --add username foo
    +
    +
    +
  4. +
  5. +

    Remove an owner from a package:

    +
    +
    +
    cargo owner --remove username foo
    +
    +
    +
  6. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-package.html b/src/doc/man/generated/cargo-package.html new file mode 100644 index 000000000..268789ce4 --- /dev/null +++ b/src/doc/man/generated/cargo-package.html @@ -0,0 +1,301 @@ +

NAME

+
+

cargo-package - Assemble the local package into a distributable tarball

+
+
+

SYNOPSIS

+
+
+

cargo package [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will create a distributable, compressed .crate file with the +source code of the package in the current directory. The resulting file will +be stored in the target/package directory. This performs the following +steps:

+
+
+
    +
  1. +

    Load and check the current workspace, performing some basic checks.

    +
    +
      +
    • +

      Path dependencies are not allowed unless they have a version key. Cargo +will ignore the path key for dependencies in published packages.

      +
    • +
    +
    +
  2. +
  3. +

    Create the compressed .crate file.

    +
    +
      +
    • +

      The original Cargo.toml file is rewritten and normalized.

      +
    • +
    • +

      [patch], [replace], and [workspace] sections are removed from the +manifest.

      +
    • +
    • +

      A .cargo_vcs_info.json file is included that contains information +about the current VCS checkout hash if available (not included with +--allow-dirty).

      +
    • +
    +
    +
  4. +
  5. +

    Extract the .crate file and build it to verify it can build.

    +
  6. +
  7. +

    Check that build scripts did not modify any source files.

    +
  8. +
+
+
+

The list of files included can be controlled with the include and exclude +fields in the manifest.

+
+
+

See the reference for more details about +packaging and publishing.

+
+
+
+
+

OPTIONS

+
+
+

Package Options

+
+
+
-l
+
--list
+
+

Print files included in a package without making one.

+
+
--no-verify
+
+

Don’t verify the contents by building them.

+
+
--no-metadata
+
+

Ignore warnings about a lack of human-usable metadata (such as the +description or the license).

+
+
--allow-dirty
+
+

Allow working directories with uncommitted VCS changes to be packaged.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Package for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Create a compressed .crate file of the current package:

    +
    +
    +
    cargo package
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-pkgid.html b/src/doc/man/generated/cargo-pkgid.html new file mode 100644 index 000000000..01984370e --- /dev/null +++ b/src/doc/man/generated/cargo-pkgid.html @@ -0,0 +1,241 @@ +

NAME

+
+

cargo-pkgid - Print a fully qualified package specification

+
+
+

SYNOPSIS

+
+
+

cargo pkgid [OPTIONS] [SPEC]

+
+
+
+
+

DESCRIPTION

+
+
+

Given a SPEC argument, print out the fully qualified package ID specifier +for a package or dependency in the current workspace. This command will +generate an error if SPEC is ambiguous as to which package it refers to in +the dependency graph. If no SPEC is given, then the specifier for the local +package is printed.

+
+
+

This command requires that a lockfile is available and dependencies have been +fetched.

+
+
+

A package specifier consists of a name, version, and source URL. You are +allowed to use partial specifiers to succinctly match a specific package as +long as it matches only one package. The format of a SPEC can be one of the +following:

+
+ + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table 1. SPEC Query Format
SPEC StructureExample SPEC

NAME

bitflags

NAME:VERSION

bitflags:1.0.4

URL

https://github.com/rust-lang/cargo

URL#VERSION

https://github.com/rust-lang/cargo#0.33.0

URL#NAME

https://github.com/rust-lang/crates.io-index#bitflags

URL#NAME:VERSION

https://github.com/rust-lang/cargo#crates-io:0.21.0

+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+
+
-p SPEC
+
--package SPEC
+
+

Get the package ID for the given package instead of the current package.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Retrieve package specification for foo package:

    +
    +
    +
    cargo pkgid foo
    +
    +
    +
  2. +
  3. +

    Retrieve package specification for version 1.0.0 of foo:

    +
    +
    +
    cargo pkgid foo:1.0.0
    +
    +
    +
  4. +
  5. +

    Retrieve package specification for foo from crates.io:

    +
    +
    +
    cargo pkgid https://github.com/rust-lang/crates.io-index#foo
    +
    +
    +
  6. +
+
+
+
+ \ No newline at end of file diff --git a/src/doc/man/generated/cargo-publish.html b/src/doc/man/generated/cargo-publish.html new file mode 100644 index 000000000..e13b4a17c --- /dev/null +++ b/src/doc/man/generated/cargo-publish.html @@ -0,0 +1,300 @@ +

NAME

+
+

cargo-publish - Upload a package to the registry

+
+
+

SYNOPSIS

+
+
+

cargo package [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will create a distributable, compressed .crate file with the +source code of the package in the current directory and upload it to a +registry. The default registry is https://crates.io. This performs the +following steps:

+
+
+
    +
  1. +

    Performs a few checks, including:

    +
    +
      +
    • +

      Checks the package.publish key in the manifest for restrictions on which +registries you are allowed to publish to.

      +
    • +
    +
    +
  2. +
  3. +

    Create a .crate file by following the steps in cargo-package(1).

    +
  4. +
  5. +

    Upload the crate to the registry. Note that the server will perform +additional checks on the crate.

    +
  6. +
+
+
+

This command requires you to be authenticated with either the --token option +or using cargo-login(1).

+
+
+

See the reference for more details about +packaging and publishing.

+
+
+
+
+

OPTIONS

+
+
+

Publish Options

+
+
+
--dry-run
+
+

Perform all checks without uploading.

+
+
--token TOKEN
+
+

API token to use when authenticating. This overrides the token stored in +the credentials file (which is created by cargo-login(1)).

+
+

Cargo config environment variables can be +used to override the tokens stored in the credentials file. The token for +crates.io may be specified with the CARGO_REGISTRY_TOKEN environment +variable. Tokens for other registries may be specified with environment +variables of the form CARGO_REGISTRIES_NAME_TOKEN where NAME is the name +of the registry in all capital letters.

+
+
+
--no-verify
+
+

Don’t verify the contents by building them.

+
+
--allow-dirty
+
+

Allow working directories with uncommitted VCS changes to be packaged.

+
+
--index INDEX
+
+

The URL of the registry index to use.

+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Publish for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Publish the current package:

    +
    +
    +
    cargo publish
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-run.html b/src/doc/man/generated/cargo-run.html new file mode 100644 index 000000000..b102bfbba --- /dev/null +++ b/src/doc/man/generated/cargo-run.html @@ -0,0 +1,350 @@ +

NAME

+
+

cargo-run - Run the current package

+
+
+

SYNOPSIS

+
+
+

cargo run [OPTIONS] [-- ARGS]

+
+
+
+
+

DESCRIPTION

+
+
+

Run a binary or example of the local package.

+
+
+

All the arguments following the two dashes (--) are passed to the binary to +run. If you’re passing arguments to both Cargo and the binary, the ones after +-- go to the binary, the ones before go to Cargo.

+
+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+

By default, the package in the current working directory is selected. The -p +flag can be used to choose a different package in a workspace.

+
+
+
+
-p SPEC
+
--package SPEC
+
+

The package to run. See cargo-pkgid(1) for +the SPEC format.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo run will run the binary +target. If there are multiple binary targets, you must pass a target flag to +choose one.

+
+
+
+
--bin NAME
+
+

Run the specified binary.

+
+
--example NAME
+
+

Run the specified example.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Run for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Run optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build the local package and run its main target (assuming only one binary):

    +
    +
    +
    cargo run
    +
    +
    +
  2. +
  3. +

    Run an example with extra arguments:

    +
    +
    +
    cargo run --example exname -- --exoption exarg1 exarg2
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-rustc.html b/src/doc/man/generated/cargo-rustc.html new file mode 100644 index 000000000..1a7a0fceb --- /dev/null +++ b/src/doc/man/generated/cargo-rustc.html @@ -0,0 +1,413 @@ +

NAME

+
+

cargo-rustc - Compile the current package, and pass extra options to the compiler

+
+
+

SYNOPSIS

+
+
+

cargo rustc [OPTIONS] [-- ARGS]

+
+
+
+
+

DESCRIPTION

+
+
+

The specified target for the current package (or package specified by -p if +provided) will be compiled along with all of its dependencies. The specified +ARGS will all be passed to the final compiler invocation, not any of the +dependencies. Note that the compiler will still unconditionally receive +arguments such as -L, --extern, and --crate-type, and the specified +ARGS will simply be added to the compiler invocation.

+
+
+

See https://doc.rust-lang.org/rustc/index.html for documentation on rustc +flags.

+
+
+

This command requires that only one target is being compiled when additional +arguments are provided. If more than one target is available for the current +package the filters of --lib, --bin, etc, must be used to select which +target is compiled. +To pass flags to all compiler processes spawned by Cargo, use the RUSTFLAGS +environment variable or the build.rustflags +config value.

+
+
+
+
+

OPTIONS

+
+
+

Package Selection

+
+

By default, the package in the current working directory is selected. The -p +flag can be used to choose a different package in a workspace.

+
+
+
+
-p SPEC
+
--package SPEC
+
+

The package to build. See cargo-pkgid(1) for +the SPEC format.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo rustc will build all +binary and library targets of the selected package.

+
+
+

Passing target selection flags will build only the +specified targets.

+
+
+
+
--lib
+
+

Build the package’s library.

+
+
--bin NAME…​
+
+

Build the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Build all binary targets.

+
+
--example NAME…​
+
+

Build the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Build all example targets.

+
+
--test NAME…​
+
+

Build the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Build all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Build the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Build all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Build all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Build for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Build optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Check if your package (not including dependencies) uses unsafe code:

    +
    +
    +
    cargo rustc --lib -- -D unsafe-code
    +
    +
    +
  2. +
  3. +

    Try an experimental flag on the nightly compiler, such as this which prints +the size of every type:

    +
    +
    +
    cargo rustc --lib -- -Z print-type-sizes
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-rustdoc.html b/src/doc/man/generated/cargo-rustdoc.html new file mode 100644 index 000000000..22883b32f --- /dev/null +++ b/src/doc/man/generated/cargo-rustdoc.html @@ -0,0 +1,417 @@ +

NAME

+
+

cargo-rustdoc - Build a package's documentation, using specified custom flags

+
+
+

SYNOPSIS

+
+
+

cargo rustdoc [OPTIONS] [-- ARGS]

+
+
+
+
+

DESCRIPTION

+
+
+

The specified target for the current package (or package specified by -p if +provided) will be documented with the specified ARGS being passed to the +final rustdoc invocation. Dependencies will not be documented as part of this +command. Note that rustdoc will still unconditionally receive arguments such +as -L, --extern, and --crate-type, and the specified ARGS will simply +be added to the rustdoc invocation.

+
+
+

See https://doc.rust-lang.org/rustdoc/index.html for documentation on rustdoc +flags.

+
+
+

This command requires that only one target is being compiled when additional +arguments are provided. If more than one target is available for the current +package the filters of --lib, --bin, etc, must be used to select which +target is compiled. +To pass flags to all rustdoc processes spawned by Cargo, use the +RUSTDOCFLAGS environment variable or the build.rustdocflags configuration +option.

+
+
+
+
+

OPTIONS

+
+
+

Documentation Options

+
+
+
--open
+
+

Open the docs in a browser after building them.

+
+
+
+
+
+

Package Selection

+
+

By default, the package in the current working directory is selected. The -p +flag can be used to choose a different package in a workspace.

+
+
+
+
-p SPEC
+
--package SPEC
+
+

The package to document. See cargo-pkgid(1) for +the SPEC format.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo rustdoc will document all +binary and library targets of the selected package. The binary will be skipped +if its name is the same as the lib target. Binaries are skipped if they have +required-features that are missing.

+
+
+

Passing target selection flags will document only the +specified targets.

+
+
+
+
--lib
+
+

Document the package’s library.

+
+
--bin NAME…​
+
+

Document the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Document all binary targets.

+
+
--example NAME…​
+
+

Document the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Document all example targets.

+
+
--test NAME…​
+
+

Document the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Document all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Document the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Document all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Document all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Document for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Document optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build documentation with custom CSS included from a given file:

    +
    +
    +
    cargo rustdoc --lib -- --extend-css extra.css
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-search.html b/src/doc/man/generated/cargo-search.html new file mode 100644 index 000000000..a2121216f --- /dev/null +++ b/src/doc/man/generated/cargo-search.html @@ -0,0 +1,158 @@ +

NAME

+
+

cargo-search - Search packages in crates.io

+
+
+

SYNOPSIS

+
+
+

cargo search [OPTIONS] [QUERY…​]

+
+
+
+
+

DESCRIPTION

+
+
+

This performs a textual search for crates on https://crates.io. The matching +crates will be displayed along with their description in TOML format suitable +for copying into a Cargo.toml manifest.

+
+
+
+
+

OPTIONS

+
+
+

Search Options

+
+
+
--limit LIMIT
+
+

Limit the number of results (default: 10, max: 100).

+
+
--index INDEX
+
+

The URL of the registry index to use.

+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Search for a package from crates.io:

    +
    +
    +
    cargo search serde
    +
    +
    +
  2. +
+
+
+
+ \ No newline at end of file diff --git a/src/doc/man/generated/cargo-test.html b/src/doc/man/generated/cargo-test.html new file mode 100644 index 000000000..d193042d8 --- /dev/null +++ b/src/doc/man/generated/cargo-test.html @@ -0,0 +1,528 @@ +

NAME

+
+

cargo-test - Execute unit and integration tests of a package

+
+
+

SYNOPSIS

+
+
+

cargo test [OPTIONS] [TESTNAME] [-- TEST-OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Compile and execute unit and integration tests.

+
+
+

The test filtering argument TESTNAME and all the arguments following the two +dashes (--) are passed to the test binaries and thus to libtest (rustc’s +built in unit-test and micro-benchmarking framework). If you’re passing +arguments to both Cargo and the binary, the ones after -- go to the binary, +the ones before go to Cargo. For details about libtest’s arguments see the +output of cargo test — --help. As an example, this will run all tests with +foo in their name on 3 threads in parallel:

+
+
+
+
cargo test foo -- --test-threads 3
+
+
+
+

Tests are built with the --test option to rustc which creates an +executable with a main function that automatically runs all functions +annotated with the #[test] attribute in multiple threads. #[bench] +annotated functions will also be run with one iteration to verify that they +are functional.

+
+
+

The libtest harness may be disabled by setting harness = false in the target +manifest settings, in which case your code will need to provide its own main +function to handle running tests.

+
+
+

Documentation tests are also run by default, which is handled by rustdoc. It +extracts code samples from documentation comments and executes them. See the +rustdoc book for more information on +writing doc tests.

+
+
+
+
+

OPTIONS

+
+
+

Test Options

+
+
+
--no-run
+
+

Compile, but don’t run tests.

+
+
--no-fail-fast
+
+

Run all tests regardless of failure. Without this flag, Cargo will exit +after the first executable fails. The Rust test harness will run all +tests within the executable to completion, this flag only applies to +the executable as a whole.

+
+
+
+
+
+

Package Selection

+
+

By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (--all is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the workspace.default-members key in the root Cargo.toml +manifest.

+
+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Test only the specified packages. See cargo-pkgid(1) for the +SPEC format. This flag may be specified multiple times.

+
+
--all
+
+

Test all members in the workspace.

+
+
--exclude SPEC…​
+
+

Exclude the specified packages. Must be used in conjunction with the +--all flag. This flag may be specified multiple times.

+
+
+
+
+
+

Target Selection

+
+

When no target selection options are given, cargo test will build the +following targets of the selected packages:

+
+
+
    +
  • +

    lib — used to link with binaries, examples, integration tests, and doc tests

    +
  • +
  • +

    bins (only if integration tests are built and required features are +available)

    +
  • +
  • +

    examples — to ensure they compile

    +
  • +
  • +

    lib as a unit test

    +
  • +
  • +

    bins as unit tests

    +
  • +
  • +

    integration tests

    +
  • +
  • +

    doc tests for the lib target

    +
  • +
+
+
+

The default behavior can be changed by setting the test flag for the target +in the manifest settings. Setting examples to test = true will build and run +the example as a test. Setting targets to test = false will stop them from +being tested by default. Target selection options that take a target by name +ignore the test flag and will always test the given target.

+
+
+

Doc tests for libraries may be disabled by setting doctest = false for the +library in the manifest.

+
+
+

Passing target selection flags will test only the +specified targets.

+
+
+
+
--lib
+
+

Test the package’s library.

+
+
--bin NAME…​
+
+

Test the specified binary. This flag may be specified multiple times.

+
+
--bins
+
+

Test all binary targets.

+
+
--example NAME…​
+
+

Test the specified example. This flag may be specified multiple times.

+
+
--examples
+
+

Test all example targets.

+
+
--test NAME…​
+
+

Test the specified integration test. This flag may be specified multiple +times.

+
+
--tests
+
+

Test all targets in test mode that have the test = true manifest +flag set. By default this includes the library and binaries built as +unittests, and integration tests. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +unittest, and once as a dependency for binaries, integration tests, etc.). +Targets may be enabled or disabled by setting the test flag in the +manifest settings for the target.

+
+
--bench NAME…​
+
+

Test the specified benchmark. This flag may be specified multiple times.

+
+
--benches
+
+

Test all targets in benchmark mode that have the bench = true +manifest flag set. By default this includes the library and binaries built +as benchmarks, and bench targets. Be aware that this will also build any +required dependencies, so the lib target may be built twice (once as a +benchmark, and once as a dependency for binaries, benchmarks, etc.). +Targets may be enabled or disabled by setting the bench flag in the +manifest settings for the target.

+
+
--all-targets
+
+

Test all targets. This is equivalent to specifying --lib --bins +--tests --benches --examples.

+
+
--doc
+
+

Test only the library’s documentation. This cannot be mixed with other +target options.

+
+
+
+
+
+

Feature Selection

+
+

When no feature options are given, the default feature is activated for +every selected package.

+
+
+
+
--features FEATURES
+
+

Space or comma separated list of features to activate. These features only +apply to the current directory’s package. Features of direct dependencies +may be enabled with <dep-name>/<feature-name> syntax.

+
+
--all-features
+
+

Activate all available features of all selected packages.

+
+
--no-default-features
+
+

Do not activate the default feature of the current directory’s +package.

+
+
+
+
+
+

Compilation Options

+
+
+
--target TRIPLE
+
+

Test for the given architecture. The default is the host +architecture. The general format of the triple is +<arch><sub>-<vendor>-<sys>-<abi>. Run rustc --print target-list for a +list of supported targets.

+
+

This may also be specified with the build.target +config value.

+
+
+
--release
+
+

Test optimized artifacts with the release profile. See the +PROFILES section for details on how this affects profile selection.

+
+
+
+
+
+

Output Options

+
+
+
--target-dir DIRECTORY
+
+

Directory for all generated artifacts and intermediate files. May also be +specified with the CARGO_TARGET_DIR environment variable, or the +build.target-dir config value. Defaults +to target in the root of the workspace.

+
+
+
+
+
+

Display Options

+
+

By default the Rust test harness hides output from test execution to keep +results readable. Test output can be recovered (e.g., for debugging) by passing +--nocapture to the test binaries:

+
+
+
+
cargo test -- --nocapture
+
+
+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
--message-format FMT
+
+

The output format for diagnostic messages. Valid values:

+
+
    +
  • +

    human (default): Display in a human-readable text format.

    +
  • +
  • +

    json: Emit JSON messages to stdout.

    +
  • +
  • +

    short: Emit shorter, human-readable text messages.

    +
  • +
+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+

Miscellaneous Options

+
+

The --jobs argument affects the building of the test executable but does not +affect how many threads are used when running the tests. The Rust test harness +includes an option to control the number of threads used:

+
+
+
+
cargo test -j 2 -- --test-threads=2
+
+
+
+
+
-j N
+
--jobs N
+
+

Number of parallel jobs to run. May also be specified with the +build.jobs config value. Defaults to +the number of CPUs.

+
+
+
+
+
+
+
+

PROFILES

+
+
+

Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +the reference +for more details.

+
+
+

Profile selection depends on the target and crate being built. By default the +dev or test profiles are used. If the --release flag is given, then the +release or bench profiles are used.

+
+ +++++ + + + + + + + + + + + + + + + + + + + +
TargetDefault Profile--release Profile

lib, bin, example

dev

release

test, bench, or any target
+ in "test" or "bench" mode

test

bench

+
+

Dependencies use the dev/release profiles.

+
+
+

Unit tests are separate executable artifacts which use the test/bench +profiles. Example targets are built the same as with cargo build (using the +dev/release profiles) unless you are building them with the test harness +(by setting test = true in the manifest or using the --example flag) in +which case they use the test/bench profiles. Library targets are built +with the dev/release profiles when linked to an integration test, binary, +or doctest.

+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Execute all the unit and integration tests of the current package:

    +
    +
    +
    cargo test
    +
    +
    +
  2. +
  3. +

    Run only a specific test within a specific integration test:

    +
    +
    +
    cargo test --test int_test_name -- modname::test_name
    +
    +
    +
  4. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-uninstall.html b/src/doc/man/generated/cargo-uninstall.html new file mode 100644 index 000000000..bde65fc38 --- /dev/null +++ b/src/doc/man/generated/cargo-uninstall.html @@ -0,0 +1,183 @@ +

NAME

+
+

cargo-uninstall - Remove a Rust binary

+
+
+

SYNOPSIS

+
+
+

cargo uninstall [OPTIONS] [SPEC…​]

+
+
+
+
+

DESCRIPTION

+
+
+

This command removes a package installed with cargo-install(1). The SPEC +argument is a package ID specification of the package to remove (see +cargo-pkgid(1)).

+
+
+

By default all binaries are removed for a crate but the --bin and +--example flags can be used to only remove particular binaries.

+
+
+

The installation root is determined, in order of precedence:

+
+
+
    +
  • +

    --root option

    +
  • +
  • +

    CARGO_INSTALL_ROOT environment variable

    +
  • +
  • +

    install.root Cargo config value

    +
  • +
  • +

    CARGO_HOME environment variable

    +
  • +
  • +

    $HOME/.cargo

    +
  • +
+
+
+
+
+

OPTIONS

+
+
+

Install Options

+
+
+
-p
+
--package SPEC…​
+
+

Package to uninstall.

+
+
--bin NAME…​
+
+

Only uninstall the binary NAME.

+
+
--root DIR
+
+

Directory to uninstall packages from.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Uninstall a previously installed package.

    +
    +
    +
    cargo uninstall ripgrep
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-update.html b/src/doc/man/generated/cargo-update.html new file mode 100644 index 000000000..4c6be6a81 --- /dev/null +++ b/src/doc/man/generated/cargo-update.html @@ -0,0 +1,216 @@ +

NAME

+
+

cargo-update - Update dependencies as recorded in the local lock file

+
+
+

SYNOPSIS

+
+
+

cargo update [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will update dependencies in the Cargo.lock file to the latest +version. It requires that the Cargo.lock file already exists as generated +by commands such as cargo-build(1) or cargo-generate-lockfile(1).

+
+
+
+
+

OPTIONS

+
+
+

Update Options

+
+
+
-p SPEC…​
+
--package SPEC…​
+
+

Update only the specified packages. This flag may be specified +multiple times. See cargo-pkgid(1) for the SPEC format.

+
+

If packages are specified with the -p flag, then a conservative update of +the lockfile will be performed. This means that only the dependency specified +by SPEC will be updated. Its transitive dependencies will be updated only if +SPEC cannot be updated without updating dependencies. All other dependencies +will remain locked at their currently recorded versions.

+
+
+

If -p is not specified, all dependencies are updated.

+
+
+
--aggressive
+
+

When used with -p, dependencies of SPEC are forced to update as well. +Cannot be used with --precise.

+
+
--precise PRECISE
+
+

When used with -p, allows you to specify a specific version number to +set the package to. If the package comes from a git repository, this can +be a git revision (such as a SHA hash or tag).

+
+
--dry-run
+
+

Displays what would be updated, but doesn’t actually write the lockfile.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Update all dependencies in the lockfile:

    +
    +
    +
    cargo update
    +
    +
    +
  2. +
  3. +

    Update only specific dependencies:

    +
    +
    +
    cargo update -p foo -p bar
    +
    +
    +
  4. +
  5. +

    Set a specific dependency to a specific version:

    +
    +
    +
    cargo update -p foo --precise 1.2.3
    +
    +
    +
  6. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-verify-project.html b/src/doc/man/generated/cargo-verify-project.html new file mode 100644 index 000000000..f523d0376 --- /dev/null +++ b/src/doc/man/generated/cargo-verify-project.html @@ -0,0 +1,174 @@ +

NAME

+
+

cargo-verify-project - Check correctness of crate manifest

+
+
+

SYNOPSIS

+
+
+

cargo verify-project [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

This command will parse the local manifest and check its validity. It emits a +JSON object with the result. A successful validation will display:

+
+
+
+
{"success":"true"}
+
+
+
+

An invalid workspace will display:

+
+
+
+
{"invalid":"human-readable error message"}
+
+
+
+
+
+

OPTIONS

+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--manifest-path PATH
+
+

Path to the Cargo.toml file. By default, Cargo searches in the current +directory or any parent directory for the Cargo.toml file.

+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

The workspace is OK.

+
+
1
+
+

The workspace is invalid.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Check the current workspace for errors:

    +
    +
    +
    cargo verify-project
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-version.html b/src/doc/man/generated/cargo-version.html new file mode 100644 index 000000000..96332d4ea --- /dev/null +++ b/src/doc/man/generated/cargo-version.html @@ -0,0 +1,76 @@ +

NAME

+
+

cargo-version - Show version information

+
+
+

SYNOPSIS

+
+
+

cargo version [OPTIONS]

+
+
+
+
+

DESCRIPTION

+
+
+

Displays the version of Cargo.

+
+
+
+
+

OPTIONS

+
+
+
+
-v
+
--verbose
+
+

Display additional version information.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Display the version:

    +
    +
    +
    cargo version
    +
    +
    +
  2. +
  3. +

    The version is also available via flags:

    +
    +
    +
    cargo --version
    +cargo -V
    +
    +
    +
  4. +
  5. +

    Display extra version information:

    +
    +
    +
    cargo -Vv
    +
    +
    +
  6. +
+
+
+
+
+

SEE ALSO

+
+ +
+
\ No newline at end of file diff --git a/src/doc/man/generated/cargo-yank.html b/src/doc/man/generated/cargo-yank.html new file mode 100644 index 000000000..4e1d00965 --- /dev/null +++ b/src/doc/man/generated/cargo-yank.html @@ -0,0 +1,188 @@ +

NAME

+
+

cargo-yank - Remove a pushed crate from the index

+
+
+

SYNOPSIS

+
+
+

cargo yank [OPTIONS] --vers VERSION [CRATE]

+
+
+
+
+

DESCRIPTION

+
+
+

The yank command removes a previously published crate’s version from the +server’s index. This command does not delete any data, and the crate will +still be available for download via the registry’s download link.

+
+
+

Note that existing crates locked to a yanked version will still be able to +download the yanked version to use it. Cargo will, however, not allow any new +crates to be locked to any yanked version.

+
+
+

This command requires you to be authenticated with either the --token option +or using cargo-login(1).

+
+
+

If the crate name is not specified, it will use the package name from the +current directory.

+
+
+
+
+

OPTIONS

+
+
+

Owner Options

+
+
+
--vers VERSION
+
+

The version to yank or un-yank.

+
+
--undo
+
+

Undo a yank, putting a version back into the index.

+
+
--token TOKEN
+
+

API token to use when authenticating. This overrides the token stored in +the credentials file (which is created by cargo-login(1)).

+
+

Cargo config environment variables can be +used to override the tokens stored in the credentials file. The token for +crates.io may be specified with the CARGO_REGISTRY_TOKEN environment +variable. Tokens for other registries may be specified with environment +variables of the form CARGO_REGISTRIES_NAME_TOKEN where NAME is the name +of the registry in all capital letters.

+
+
+
--index INDEX
+
+

The URL of the registry index to use.

+
+
--registry REGISTRY
+
+

Name of the registry to use. Registry names are defined in Cargo config files. +If not specified, the default registry is used, which is defined by the +registry.default config key which defaults to crates-io.

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Yank a crate from the index:

    +
    +
    +
    cargo yank --vers 1.0.7 foo
    +
    +
    +
  2. +
+
+
+
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/generated/cargo.html b/src/doc/man/generated/cargo.html new file mode 100644 index 000000000..92cfbfdd3 --- /dev/null +++ b/src/doc/man/generated/cargo.html @@ -0,0 +1,433 @@ +

NAME

+
+

cargo - The Rust package manager

+
+
+

SYNOPSIS

+
+
+

cargo [OPTIONS] COMMAND [ARGS]
+cargo [OPTIONS] --version
+cargo [OPTIONS] --list
+cargo [OPTIONS] --help
+cargo [OPTIONS] --explain CODE

+
+
+
+
+

DESCRIPTION

+
+
+

This program is a package manager and build tool for the Rust language, +available at http://rust-lang.org.

+
+
+
+
+

COMMANDS

+
+
+

Build Commands

+
+
+
cargo-bench(1)
+
+

Execute benchmarks of a package.

+
+
cargo-build(1)
+
+

Compile a package.

+
+
cargo-check(1)
+
+

Check a local package and all of its dependencies for errors.

+
+
cargo-clean(1)
+
+

Remove artifacts that Cargo has generated in the past.

+
+
cargo-doc(1)
+
+

Build a package’s documentation.

+
+
cargo-fetch(1)
+
+

Fetch dependencies of a package from the network.

+
+
cargo-fix(1)
+
+

Automatically fix lint warnings reported by rustc.

+
+
cargo-run(1)
+
+

Run a binary or example of the local package.

+
+
cargo-rustc(1)
+
+

Compile a package, and pass extra options to the compiler.

+
+
cargo-rustdoc(1)
+
+

Build a package’s documentation, using specified custom flags.

+
+
cargo-test(1)
+
+

Execute unit and integration tests of a package.

+
+
+
+
+
+

Manifest Commands

+
+
+
cargo-generate-lockfile(1)
+
+

Generate Cargo.lock for a project.

+
+
cargo-locate-project(1)
+
+

Print a JSON representation of a Cargo.toml file’s location.

+
+
cargo-metadata(1)
+
+

Output the resolved dependencies of a package, the concrete used versions +including overrides, in machine-readable format.

+
+
cargo-pkgid(1)
+
+

Print a fully qualified package specification.

+
+
cargo-update(1)
+
+

Update dependencies as recorded in the local lock file.

+
+
cargo-verify-project(1)
+
+

Check correctness of crate manifest.

+
+
+
+
+
+

Package Commands

+
+
+
cargo-init(1)
+
+

Create a new Cargo package in an existing directory.

+
+
cargo-install(1)
+
+

Build and install a Rust binary.

+
+
cargo-new(1)
+
+

Create a new Cargo package.

+
+
cargo-search(1)
+
+

Search packages in crates.io.

+
+
cargo-uninstall(1)
+
+

Remove a Rust binary.

+
+
+
+
+
+

Publishing Commands

+
+
+
cargo-login(1)
+
+

Save an API token from the registry locally.

+
+
cargo-owner(1)
+
+

Manage the owners of a crate on the registry.

+
+
cargo-package(1)
+
+

Assemble the local package into a distributable tarball.

+
+
cargo-publish(1)
+
+

Upload a package to the registry.

+
+
cargo-yank(1)
+
+

Remove a pushed crate from the index.

+
+
+
+
+
+

General Commands

+
+
+
cargo-help(1)
+
+

Display help information about Cargo.

+
+
cargo-version(1)
+
+

Show version information.

+
+
+
+
+
+
+
+

OPTIONS

+
+
+

Special Options

+
+
+
-V
+
--version
+
+

Print version info and exit. If used with --verbose, prints extra +information.

+
+
--list
+
+

List all installed Cargo subcommands. If used with --verbose, prints +extra information.

+
+
--explain CODE
+
+

Run rustc --explain CODE which will print out a detailed explanation of +an error message (for example, E0004).

+
+
+
+
+
+

Display Options

+
+
+
-v
+
--verbose
+
+

Use verbose output. May be specified twice for "very verbose" output which +includes extra output such as dependency warnings and build script output. +May also be specified with the term.verbose +config value.

+
+
-q
+
--quiet
+
+

No output printed to stdout.

+
+
--color WHEN
+
+

Control when colored output is used. Valid values:

+
+
    +
  • +

    auto (default): Automatically detect if color support is available on the +terminal.

    +
  • +
  • +

    always: Always display colors.

    +
  • +
  • +

    never: Never display colors.

    +
  • +
+
+
+

May also be specified with the term.color +config value.

+
+
+
+
+
+
+

Manifest Options

+
+
+
--frozen
+
--locked
+
+

Either of these flags requires that the Cargo.lock file is +up-to-date. If the lock file is missing, or it needs to be updated, Cargo will +exit with an error. The --frozen flag also prevents Cargo from +attempting to access the network to determine if it is out-of-date.

+
+

These may be used in environments where you want to assert that the +Cargo.lock file is up-to-date (such as a CI build) or want to avoid network +access.

+
+
+
+
+
+
+

Common Options

+
+
+
-h
+
--help
+
+

Prints help information.

+
+
-Z FLAG…​
+
+

Unstable (nightly-only) flags to Cargo. Run cargo -Z help for +details.

+
+
+
+
+
+
+
+

ENVIRONMENT

+
+
+

See the reference for +details on environment variables that Cargo reads.

+
+
+
+
+

Exit Status

+
+
+
+
0
+
+

Cargo succeeded.

+
+
101
+
+

Cargo failed to complete.

+
+
+
+
+
+
+

FILES

+
+
+
+
~/.cargo/
+
+

Default location for Cargo’s "home" directory where it stores various +files. The location can be changed with the CARGO_HOME environment +variable.

+
+
$CARGO_HOME/bin/
+
+

Binaries installed by cargo-install(1) will be located here. If using +rustup, executables distributed with Rust are also located here.

+
+
$CARGO_HOME/config
+
+

The global configuration file. See the reference +for more information about configuration files.

+
+
.cargo/config
+
+

Cargo automatically searches for a file named .cargo/config in the +current directory, and all parent directories. These configuration files +will be merged with the global configuration file.

+
+
$CARGO_HOME/credentials
+
+

Private authentication information for logging in to a registry.

+
+
$CARGO_HOME/registry/
+
+

This directory contains cached downloads of the registry index and any +downloaded dependencies.

+
+
$CARGO_HOME/git/
+
+

This directory contains cached downloads of git dependencies.

+
+
+
+
+
+
+

EXAMPLES

+
+
+
    +
  1. +

    Build a local package and all of its dependencies:

    +
    +
    +
    cargo build
    +
    +
    +
  2. +
  3. +

    Build a package with optimizations:

    +
    +
    +
    cargo build --release
    +
    +
    +
  4. +
  5. +

    Run tests for a cross-compiled target:

    +
    +
    +
    cargo test --target i686-unknown-linux-gnu
    +
    +
    +
  6. +
  7. +

    Create a new package that builds an executable:

    +
    +
    +
    cargo new foobar
    +
    +
    +
  8. +
  9. +

    Create a package in the current directory:

    +
    +
    +
    mkdir foo && cd foo
    +cargo init .
    +
    +
    +
  10. +
  11. +

    Learn about a command’s options and usage:

    +
    +
    +
    cargo help clean
    +
    +
    +
  12. +
+
+
+
+
+

BUGS

+ +
+
+

SEE ALSO

+ +
\ No newline at end of file diff --git a/src/doc/man/options-common.adoc b/src/doc/man/options-common.adoc new file mode 100644 index 000000000..b8c2fce38 --- /dev/null +++ b/src/doc/man/options-common.adoc @@ -0,0 +1,7 @@ +*-h*:: +*--help*:: + Prints help information. + +*-Z* _FLAG_...:: + Unstable (nightly-only) flags to Cargo. Run `cargo -Z help` for + details. diff --git a/src/doc/man/options-display.adoc b/src/doc/man/options-display.adoc new file mode 100644 index 000000000..cc2e22633 --- /dev/null +++ b/src/doc/man/options-display.adoc @@ -0,0 +1,22 @@ +*-v*:: +*--verbose*:: + Use verbose output. May be specified twice for "very verbose" output which + includes extra output such as dependency warnings and build script output. + May also be specified with the `term.verbose` + linkcargo:reference/config.html[config value]. + +*-q*:: +*--quiet*:: + No output printed to stdout. + +*--color* _WHEN_:: + Control when colored output is used. Valid values: ++ +- `auto` (default): Automatically detect if color support is available on the + terminal. +- `always`: Always display colors. +- `never`: Never display colors. + ++ +May also be specified with the `term.color` +linkcargo:reference/config.html[config value]. diff --git a/src/doc/man/options-features.adoc b/src/doc/man/options-features.adoc new file mode 100644 index 000000000..666ef21e4 --- /dev/null +++ b/src/doc/man/options-features.adoc @@ -0,0 +1,16 @@ +=== Feature Selection + +When no feature options are given, the `default` feature is activated for +every selected package. + +*--features* _FEATURES_:: + Space or comma separated list of features to activate. These features only + apply to the current directory's package. Features of direct dependencies + may be enabled with `/` syntax. + +*--all-features*:: + Activate all available features of all selected packages. + +*--no-default-features*:: + Do not activate the `default` feature of the current directory's + package. diff --git a/src/doc/man/options-index.adoc b/src/doc/man/options-index.adoc new file mode 100644 index 000000000..1321866ba --- /dev/null +++ b/src/doc/man/options-index.adoc @@ -0,0 +1,2 @@ +*--index* _INDEX_:: + The URL of the registry index to use. diff --git a/src/doc/man/options-jobs.adoc b/src/doc/man/options-jobs.adoc new file mode 100644 index 000000000..9d817426b --- /dev/null +++ b/src/doc/man/options-jobs.adoc @@ -0,0 +1,5 @@ +*-j* _N_:: +*--jobs* _N_:: + Number of parallel jobs to run. May also be specified with the + `build.jobs` linkcargo:reference/config.html[config value]. Defaults to + the number of CPUs. diff --git a/src/doc/man/options-locked.adoc b/src/doc/man/options-locked.adoc new file mode 100644 index 000000000..e783f5fcd --- /dev/null +++ b/src/doc/man/options-locked.adoc @@ -0,0 +1,10 @@ +*--frozen*:: +*--locked*:: + Either of these flags requires that the `Cargo.lock` file is + up-to-date. If the lock file is missing, or it needs to be updated, Cargo will + exit with an error. The `--frozen` flag also prevents Cargo from + attempting to access the network to determine if it is out-of-date. ++ +These may be used in environments where you want to assert that the +`Cargo.lock` file is up-to-date (such as a CI build) or want to avoid network +access. diff --git a/src/doc/man/options-manifest-path.adoc b/src/doc/man/options-manifest-path.adoc new file mode 100644 index 000000000..1bc4d80eb --- /dev/null +++ b/src/doc/man/options-manifest-path.adoc @@ -0,0 +1,3 @@ +*--manifest-path* _PATH_:: + Path to the `Cargo.toml` file. By default, Cargo searches in the current + directory or any parent directory for the `Cargo.toml` file. diff --git a/src/doc/man/options-message-format.adoc b/src/doc/man/options-message-format.adoc new file mode 100644 index 000000000..6da9c26bd --- /dev/null +++ b/src/doc/man/options-message-format.adoc @@ -0,0 +1,6 @@ +*--message-format* _FMT_:: + The output format for diagnostic messages. Valid values: ++ +- `human` (default): Display in a human-readable text format. +- `json`: Emit JSON messages to stdout. +- `short`: Emit shorter, human-readable text messages. diff --git a/src/doc/man/options-new.adoc b/src/doc/man/options-new.adoc new file mode 100644 index 000000000..2218599ae --- /dev/null +++ b/src/doc/man/options-new.adoc @@ -0,0 +1,29 @@ +*--bin*:: + Create a package with a binary target (`src/main.rs`). + This is the default behavior. + +*--lib*:: + Create a package with a library target (`src/lib.rs`). + +*--edition* _EDITION_:: + Specify the Rust edition to use. Default is 2018. + Possible values: 2015, 2018 + +*--name* _NAME_:: + Set the package name. Defaults to the directory name. + +*--vcs* _VCS_:: + Initialize a new VCS repository for the given version control system (git, + hg, pijul, or fossil) or do not initialize any version control at all + (none). If not specified, defaults to `git` or the configuration value + `cargo-new.vcs`, or `none` if already inside a VCS repository. + +*--registry* _REGISTRY_:: + This sets the `publish` field in `Cargo.toml` to the given registry name + which will restrict publishing only to that registry. ++ +Registry names are defined in linkcargo:reference/config.html[Cargo config files]. +If not specified, the default registry defined by the `registry.default` +config key is used. If the default registry is not set and `--registry` is not +used, the `publish` field will not be set which means that publishing will not +be restricted. diff --git a/src/doc/man/options-package.adoc b/src/doc/man/options-package.adoc new file mode 100644 index 000000000..c0cfbc35e --- /dev/null +++ b/src/doc/man/options-package.adoc @@ -0,0 +1,7 @@ +By default, the package in the current working directory is selected. The `-p` +flag can be used to choose a different package in a workspace. + +*-p* _SPEC_:: +*--package* _SPEC_:: + The package to convert:lowercase[{actionverb}]. See man:cargo-pkgid[1] for + the SPEC format. diff --git a/src/doc/man/options-packages.adoc b/src/doc/man/options-packages.adoc new file mode 100644 index 000000000..51ab9f0c3 --- /dev/null +++ b/src/doc/man/options-packages.adoc @@ -0,0 +1,18 @@ +By default, when no package selection options are given, the packages selected +depend on the current working directory. In the root of a virtual workspace, +all workspace members are selected (`--all` is implied). Otherwise, only the +package in the current directory will be selected. The default packages may be +overridden with the `workspace.default-members` key in the root `Cargo.toml` +manifest. + +*-p* _SPEC_...:: +*--package* _SPEC_...:: + {actionverb} only the specified packages. See man:cargo-pkgid[1] for the + SPEC format. This flag may be specified multiple times. + +*--all*:: + {actionverb} all members in the workspace. + +*--exclude* _SPEC_...:: + Exclude the specified packages. Must be used in conjunction with the + `--all` flag. This flag may be specified multiple times. diff --git a/src/doc/man/options-profile.adoc b/src/doc/man/options-profile.adoc new file mode 100644 index 000000000..3c5ad14c7 --- /dev/null +++ b/src/doc/man/options-profile.adoc @@ -0,0 +1,6 @@ +*--profile* _NAME_:: + Changes convert:lowercase[{actionverb}] behavior. Currently only `test` is + supported, which will convert:lowercase[{actionverb}] with the + `#[cfg(test)]` attribute enabled. This is useful to have it + convert:lowercase[{actionverb}] unit tests which are usually excluded via + the `cfg` attribute. This does not change the actual profile used. diff --git a/src/doc/man/options-registry.adoc b/src/doc/man/options-registry.adoc new file mode 100644 index 000000000..a0c4c27c8 --- /dev/null +++ b/src/doc/man/options-registry.adoc @@ -0,0 +1,4 @@ +*--registry* _REGISTRY_:: + Name of the registry to use. Registry names are defined in linkcargo:reference/config.html[Cargo config files]. + If not specified, the default registry is used, which is defined by the + `registry.default` config key which defaults to `crates-io`. diff --git a/src/doc/man/options-release.adoc b/src/doc/man/options-release.adoc new file mode 100644 index 000000000..e99539172 --- /dev/null +++ b/src/doc/man/options-release.adoc @@ -0,0 +1,3 @@ +*--release*:: + {actionverb} optimized artifacts with the `release` profile. See the + <> section for details on how this affects profile selection. diff --git a/src/doc/man/options-target-dir.adoc b/src/doc/man/options-target-dir.adoc new file mode 100644 index 000000000..f044bd712 --- /dev/null +++ b/src/doc/man/options-target-dir.adoc @@ -0,0 +1,5 @@ +*--target-dir* _DIRECTORY_:: + Directory for all generated artifacts and intermediate files. May also be + specified with the `CARGO_TARGET_DIR` environment variable, or the + `build.target-dir` linkcargo:reference/config.html[config value]. Defaults + to `target` in the root of the workspace. diff --git a/src/doc/man/options-target-triple.adoc b/src/doc/man/options-target-triple.adoc new file mode 100644 index 000000000..eac97d885 --- /dev/null +++ b/src/doc/man/options-target-triple.adoc @@ -0,0 +1,8 @@ +*--target* _TRIPLE_:: + {actionverb} for the given architecture. The default is the host + architecture. The general format of the triple is + `---`. Run `rustc --print target-list` for a + list of supported targets. ++ +This may also be specified with the `build.target` +linkcargo:reference/config.html[config value]. diff --git a/src/doc/man/options-targets-lib-bin.adoc b/src/doc/man/options-targets-lib-bin.adoc new file mode 100644 index 000000000..8668ba84b --- /dev/null +++ b/src/doc/man/options-targets-lib-bin.adoc @@ -0,0 +1,8 @@ +*--lib*:: + {actionverb} the package's library. + +*--bin* _NAME_...:: + {actionverb} the specified binary. This flag may be specified multiple times. + +*--bins*:: + {actionverb} all binary targets. diff --git a/src/doc/man/options-targets.adoc b/src/doc/man/options-targets.adoc new file mode 100644 index 000000000..6a8a46cd7 --- /dev/null +++ b/src/doc/man/options-targets.adoc @@ -0,0 +1,39 @@ +Passing target selection flags will convert:lowercase[{actionverb}] only the +specified targets. + +include::options-targets-lib-bin.adoc[] + +*--example* _NAME_...:: + {actionverb} the specified example. This flag may be specified multiple times. + +*--examples*:: + {actionverb} all example targets. + +*--test* _NAME_...:: + {actionverb} the specified integration test. This flag may be specified multiple + times. + +*--tests*:: + {actionverb} all targets in test mode that have the `test = true` manifest + flag set. By default this includes the library and binaries built as + unittests, and integration tests. Be aware that this will also build any + required dependencies, so the lib target may be built twice (once as a + unittest, and once as a dependency for binaries, integration tests, etc.). + Targets may be enabled or disabled by setting the `test` flag in the + manifest settings for the target. + +*--bench* _NAME_...:: + {actionverb} the specified benchmark. This flag may be specified multiple times. + +*--benches*:: + {actionverb} all targets in benchmark mode that have the `bench = true` + manifest flag set. By default this includes the library and binaries built + as benchmarks, and bench targets. Be aware that this will also build any + required dependencies, so the lib target may be built twice (once as a + benchmark, and once as a dependency for binaries, benchmarks, etc.). + Targets may be enabled or disabled by setting the `bench` flag in the + manifest settings for the target. + +*--all-targets*:: + {actionverb} all targets. This is equivalent to specifying `--lib --bins + --tests --benches --examples`. diff --git a/src/doc/man/options-test.adoc b/src/doc/man/options-test.adoc new file mode 100644 index 000000000..0cdcb3d7e --- /dev/null +++ b/src/doc/man/options-test.adoc @@ -0,0 +1,8 @@ +*--no-run*:: + Compile, but don't run {nouns}. + +*--no-fail-fast*:: + Run all {nouns} regardless of failure. Without this flag, Cargo will exit + after the first executable fails. The Rust test harness will run all + {nouns} within the executable to completion, this flag only applies to + the executable as a whole. diff --git a/src/doc/man/options-token.adoc b/src/doc/man/options-token.adoc new file mode 100644 index 000000000..5f25ffbf2 --- /dev/null +++ b/src/doc/man/options-token.adoc @@ -0,0 +1,10 @@ +*--token* _TOKEN_:: + API token to use when authenticating. This overrides the token stored in + the credentials file (which is created by man:cargo-login[1]). ++ +linkcargo:reference/config.html[Cargo config] environment variables can be +used to override the tokens stored in the credentials file. The token for +crates.io may be specified with the `CARGO_REGISTRY_TOKEN` environment +variable. Tokens for other registries may be specified with environment +variables of the form `CARGO_REGISTRIES_NAME_TOKEN` where `NAME` is the name +of the registry in all capital letters. diff --git a/src/doc/man/section-environment.adoc b/src/doc/man/section-environment.adoc new file mode 100644 index 000000000..5cc69e995 --- /dev/null +++ b/src/doc/man/section-environment.adoc @@ -0,0 +1,4 @@ +== ENVIRONMENT + +See linkcargo:reference/environment-variables.html[the reference] for +details on environment variables that Cargo reads. diff --git a/src/doc/man/section-exit-status.adoc b/src/doc/man/section-exit-status.adoc new file mode 100644 index 000000000..427b3903c --- /dev/null +++ b/src/doc/man/section-exit-status.adoc @@ -0,0 +1,7 @@ +== Exit Status + +0:: + Cargo succeeded. + +101:: + Cargo failed to complete. diff --git a/src/doc/man/section-profiles.adoc b/src/doc/man/section-profiles.adoc new file mode 100644 index 000000000..02cf4acaa --- /dev/null +++ b/src/doc/man/section-profiles.adoc @@ -0,0 +1,26 @@ +== PROFILES + +Profiles may be used to configure compiler options such as optimization levels +and debug settings. See +linkcargo:reference/manifest.html#the-profile-sections[the reference] +for more details. + +Profile selection depends on the target and crate being built. By default the +`dev` or `test` profiles are used. If the `--release` flag is given, then the +`release` or `bench` profiles are used. + +[%autowidth] +|=== +|Target |Default Profile |`--release` Profile + +|lib, bin, example +|`dev` +|`release` + +|test, bench, or any target + + in "test" or "bench" mode +|`test` +|`bench` +|=== + +Dependencies use the `dev`/`release` profiles. diff --git a/src/doc/src/SUMMARY.md b/src/doc/src/SUMMARY.md new file mode 100644 index 000000000..3a69c0a31 --- /dev/null +++ b/src/doc/src/SUMMARY.md @@ -0,0 +1,70 @@ +# Summary + +[Introduction](index.md) + +* [Getting Started](getting-started/index.md) + * [Installation](getting-started/installation.md) + * [First Steps with Cargo](getting-started/first-steps.md) + +* [Cargo Guide](guide/index.md) + * [Why Cargo Exists](guide/why-cargo-exists.md) + * [Creating a New Package](guide/creating-a-new-project.md) + * [Working on an Existing Package](guide/working-on-an-existing-project.md) + * [Dependencies](guide/dependencies.md) + * [Package Layout](guide/project-layout.md) + * [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.md) + * [Tests](guide/tests.md) + * [Continuous Integration](guide/continuous-integration.md) + * [Build Cache](guide/build-cache.md) + +* [Cargo Reference](reference/index.md) + * [Specifying Dependencies](reference/specifying-dependencies.md) + * [The Manifest Format](reference/manifest.md) + * [Configuration](reference/config.md) + * [Environment Variables](reference/environment-variables.md) + * [Build Scripts](reference/build-scripts.md) + * [Publishing on crates.io](reference/publishing.md) + * [Package ID Specifications](reference/pkgid-spec.md) + * [Source Replacement](reference/source-replacement.md) + * [External Tools](reference/external-tools.md) + * [Registries](reference/registries.md) + * [Unstable Features](reference/unstable.md) + +* [Cargo Commands](commands/index.md) + * [Build Commands](commands/build-commands.md) + * [bench](commands/cargo-bench.md) + * [build](commands/cargo-build.md) + * [check](commands/cargo-check.md) + * [clean](commands/cargo-clean.md) + * [doc](commands/cargo-doc.md) + * [fetch](commands/cargo-fetch.md) + * [fix](commands/cargo-fix.md) + * [run](commands/cargo-run.md) + * [rustc](commands/cargo-rustc.md) + * [rustdoc](commands/cargo-rustdoc.md) + * [test](commands/cargo-test.md) + * [Manifest Commands](commands/manifest-commands.md) + * [generate-lockfile](commands/cargo-generate-lockfile.md) + * [locate-project](commands/cargo-locate-project.md) + * [metadata](commands/cargo-metadata.md) + * [pkgid](commands/cargo-pkgid.md) + * [update](commands/cargo-update.md) + * [verify-project](commands/cargo-verify-project.md) + * [Package Commands](commands/package-commands.md) + * [init](commands/cargo-init.md) + * [install](commands/cargo-install.md) + * [new](commands/cargo-new.md) + * [search](commands/cargo-search.md) + * [uninstall](commands/cargo-uninstall.md) + * [Publishing Commands](commands/publishing-commands.md) + * [login](commands/cargo-login.md) + * [owner](commands/cargo-owner.md) + * [package](commands/cargo-package.md) + * [publish](commands/cargo-publish.md) + * [yank](commands/cargo-yank.md) + * [General Commands](commands/general-commands.md) + * [help](commands/cargo-help.md) + * [version](commands/cargo-version.md) + +* [FAQ](faq.md) +* [Appendix: Glossary](appendix/glossary.md) diff --git a/src/doc/src/appendix/glossary.md b/src/doc/src/appendix/glossary.md new file mode 100644 index 000000000..fe2f888de --- /dev/null +++ b/src/doc/src/appendix/glossary.md @@ -0,0 +1,190 @@ +# Glossary + +### Artifact + +An *artifact* is the file or set of files created as a result of the +compilation process. This includes linkable libraries and executable binaries. + +### Crate + +Every target in a package is a *crate*. Crates are either libraries or +executable binaries. It may loosely refer to either the source code of the +target, or the compiled artifact that the target produces. A crate may also +refer to a compressed package fetched from a registry. + +### Edition + +A *Rust edition* is a developmental landmark of the Rust language. The +[edition of a package][edition-field] is specified in the `Cargo.toml` +manifest, and individual targets can specify which edition they use. See the +[Edition Guide] for more information. + +### Feature + +A [*feature*][feature] is a named flag which allows for conditional +compilation. A feature can refer to an optional dependency, or an arbitrary +name defined in a `Cargo.toml` manifest that can be checked within source +code. + +Cargo has [*unstable feature flags*][cargo-unstable] which can be used to +enable experimental behavior of Cargo itself. The Rust compiler and Rustdoc +also have their own unstable feature flags (see [The Unstable +Book][unstable-book] and [The Rustdoc Book][rustdoc-unstable]). + +### Index + +The index is the searchable list of crates in a registry. + +### Lock file + +The `Cargo.lock` *lock file* is a file that captures the exact version of +every dependency used in a workspace or package. It is automatically generated +by Cargo. See [Cargo.toml vs Cargo.lock]. + +### Manifest + +A [*manifest*][manifest] is a description of a package or a workspace in a +file named `Cargo.toml`. + +A [*virtual manifest*][virtual] is a `Cargo.toml` file that only describes a +workspace, and does not include a package. + +### Member + +A *member* is a package that belongs to a workspace. + +### Package + +A *package* is a collection of source files and a `Cargo.toml` manifest which +describes the package. A package has a name and version which is used for +specifying dependencies between packages. A package contains multiple targets, +which are either libraries or executable binaries. + +The *package root* is the directory where the package's `Cargo.toml` manifest +is located. + +The [*package ID specification*][pkgid-spec], or *SPEC*, is a string used to +uniquely reference a specific version of a package from a specific source. + +### Project + +Another name for a [package](#package). + +### Registry + +A *registry* is a service that contains a collection of downloadable crates +that can be installed or used as dependencies for a package. The default +registry is [crates.io](https://crates.io). The registry has an *index* which +contains a list of all crates, and tells Cargo how to download the crates that +are needed. + +### Source + +A *source* is a provider that contains crates that may be included as +dependencies for a package. There are several kinds of sources: + +- **Registry source** — See [registry](#registry). +- **Local registry source** — A set of crates stored as compressed files on + the filesystem. See [Local Registry Sources]. +- **Directory source** — A set of crates stored as uncompressed files on the + filesystem. See [Directory Sources]. +- **Path source** — An individual package located on the filesystem (such as a + [path dependency]) or a set of multiple packages (such as [path overrides]). +- **Git source** — Packages located in a git repository (such as a [git + dependency] or [git source]). + +See [Source Replacement] for more information. + +### Spec + +See [package ID specification](#package). + +### Target + +The meaning of the term *target* depends on the context: + +- **Cargo Target** — Cargo packages consist of *targets* which correspond to + artifacts that will be produced. Packages can have library, binary, example, + test, and benchmark targets. The [list of targets][targets] are configured + in the `Cargo.toml` manifest, often inferred automatically by the [directory + layout] of the source files. +- **Target Directory** — Cargo places all built artifacts and intermediate + files in the *target* directory. By default this is a directory named + `target` at the workspace root, or the package root if not using a + workspace. The directory may be changed with the `--target-dir` command-line + option, the `CARGO_TARGET_DIR` [environment variable], or the + `build.target-dir` [config option]. +- **Target Architecture** — The OS and machine architecture for the built + artifacts are typically referred to as a *target*. +- **Target Triple** — A triple is a specific format for specifying a target + architecture. Triples may be referred to as a *target triple* which is the + architecture for the artifact produced, and the *host triple* which is the + architecture that the compiler is running on. The target triple can be + specified with the `--target` command-line option or the `build.target` + [config option]. The general format of the triple is + `---` where: + + - `arch` = The base CPU architecture, for example `x86_64`, `i686`, `arm`, + `thumb`, `mips`, etc. + - `sub` = The CPU sub-architecture, for example `arm` has `v7`, `v7s`, + `v5te`, etc. + - `vendor` = The vendor, for example `unknown`, `apple`, `pc`, `linux`, etc. + - `sys` = The system name, for example `linux`, `windows`, etc. `none` is + typically used for bare-metal without an OS. + - `abi` = The ABI, for example `gnu`, `android`, `eabi`, etc. + + Some parameters may be omitted. Run `rustc --print target-list` for a list of + supported targets. + +### Test Targets + +Cargo *test targets* generate binaries which help verify proper operation and +correctness of code. There are two types of test artifacts: + +* **Unit test** — A *unit test* is an executable binary compiled directly from + a library or a binary target. It contains the entire contents of the library + or binary code, and runs `#[test]` annotated functions, intended to verify + individual units of code. +* **Integration test target** — An [*integration test + target*][integration-tests] is an executable binary compiled from a *test + target* which is a distinct crate whose source is located in the `tests` + directory or specified by the [`[[test]]` table][targets] in the + `Cargo.toml` manifest. It is intended to only test the public API of a + library, or execute a binary to verify its operation. + +### Workspace + +A [*workspace*][workspace] is a collection of one or more packages that share +common dependency resolution (with a shared `Cargo.lock`), output directory, +and various settings such as profiles. + +A [*virtual workspace*][virtual] is a workspace where the root `Cargo.toml` +manifest does not define a package, and only lists the workspace members. + +The *workspace root* is the directory where the workspace's `Cargo.toml` +manifest is located. + + +[Cargo.toml vs Cargo.lock]: guide/cargo-toml-vs-cargo-lock.html +[Directory Sources]: reference/source-replacement.html#directory-sources +[Local Registry Sources]: reference/source-replacement.html#local-registry-sources +[Source Replacement]: reference/source-replacement.html +[cargo-unstable]: https://doc.rust-lang.org/nightly/cargo/reference/unstable.html +[config option]: reference/config.html +[directory layout]: reference/manifest.html#the-project-layout +[edition guide]: https://rust-lang-nursery.github.io/edition-guide/ +[edition-field]: reference/manifest.html#the-edition-field-optional +[environment variable]: reference/environment-variables.html +[feature]: reference/manifest.html#the-features-section +[git dependency]: reference/specifying-dependencies.html#specifying-dependencies-from-git-repositories +[git source]: reference/source-replacement.html +[integration-tests]: reference/manifest.html#integration-tests +[manifest]: reference/manifest.html +[path dependency]: reference/specifying-dependencies.html#specifying-path-dependencies +[path overrides]: reference/specifying-dependencies.html#overriding-with-local-dependencies +[pkgid-spec]: reference/pkgid-spec.html +[rustdoc-unstable]: https://doc.rust-lang.org/nightly/rustdoc/unstable-features.html +[targets]: reference/manifest.html#configuring-a-target +[unstable-book]: https://doc.rust-lang.org/nightly/unstable-book/index.html +[virtual]: reference/manifest.html#virtual-manifest +[workspace]: reference/manifest.html#the-workspace-section diff --git a/src/doc/src/commands/build-commands.md b/src/doc/src/commands/build-commands.md new file mode 100644 index 000000000..f47265181 --- /dev/null +++ b/src/doc/src/commands/build-commands.md @@ -0,0 +1 @@ +# Build Commands diff --git a/src/doc/src/commands/cargo-bench.md b/src/doc/src/commands/cargo-bench.md new file mode 100644 index 000000000..e840cbf6a --- /dev/null +++ b/src/doc/src/commands/cargo-bench.md @@ -0,0 +1,3 @@ +# cargo bench +{{#include command-common.html}} +{{#include ../../man/generated/cargo-bench.html}} diff --git a/src/doc/src/commands/cargo-build.md b/src/doc/src/commands/cargo-build.md new file mode 100644 index 000000000..dd46f23ed --- /dev/null +++ b/src/doc/src/commands/cargo-build.md @@ -0,0 +1,3 @@ +# cargo build +{{#include command-common.html}} +{{#include ../../man/generated/cargo-build.html}} diff --git a/src/doc/src/commands/cargo-check.md b/src/doc/src/commands/cargo-check.md new file mode 100644 index 000000000..473d00afe --- /dev/null +++ b/src/doc/src/commands/cargo-check.md @@ -0,0 +1,3 @@ +# cargo check +{{#include command-common.html}} +{{#include ../../man/generated/cargo-check.html}} diff --git a/src/doc/src/commands/cargo-clean.md b/src/doc/src/commands/cargo-clean.md new file mode 100644 index 000000000..df321febe --- /dev/null +++ b/src/doc/src/commands/cargo-clean.md @@ -0,0 +1,3 @@ +# cargo clean +{{#include command-common.html}} +{{#include ../../man/generated/cargo-clean.html}} diff --git a/src/doc/src/commands/cargo-doc.md b/src/doc/src/commands/cargo-doc.md new file mode 100644 index 000000000..9cdd89704 --- /dev/null +++ b/src/doc/src/commands/cargo-doc.md @@ -0,0 +1,3 @@ +# cargo doc +{{#include command-common.html}} +{{#include ../../man/generated/cargo-doc.html}} diff --git a/src/doc/src/commands/cargo-fetch.md b/src/doc/src/commands/cargo-fetch.md new file mode 100644 index 000000000..bb5beda1e --- /dev/null +++ b/src/doc/src/commands/cargo-fetch.md @@ -0,0 +1,3 @@ +# cargo fetch +{{#include command-common.html}} +{{#include ../../man/generated/cargo-fetch.html}} diff --git a/src/doc/src/commands/cargo-fix.md b/src/doc/src/commands/cargo-fix.md new file mode 100644 index 000000000..66503337a --- /dev/null +++ b/src/doc/src/commands/cargo-fix.md @@ -0,0 +1,3 @@ +# cargo fix +{{#include command-common.html}} +{{#include ../../man/generated/cargo-fix.html}} diff --git a/src/doc/src/commands/cargo-generate-lockfile.md b/src/doc/src/commands/cargo-generate-lockfile.md new file mode 100644 index 000000000..57b706911 --- /dev/null +++ b/src/doc/src/commands/cargo-generate-lockfile.md @@ -0,0 +1,3 @@ +# cargo generate-lockfile +{{#include command-common.html}} +{{#include ../../man/generated/cargo-generate-lockfile.html}} diff --git a/src/doc/src/commands/cargo-help.md b/src/doc/src/commands/cargo-help.md new file mode 100644 index 000000000..7e1ba0730 --- /dev/null +++ b/src/doc/src/commands/cargo-help.md @@ -0,0 +1,3 @@ +# cargo help +{{#include command-common.html}} +{{#include ../../man/generated/cargo-help.html}} diff --git a/src/doc/src/commands/cargo-init.md b/src/doc/src/commands/cargo-init.md new file mode 100644 index 000000000..044a8b12d --- /dev/null +++ b/src/doc/src/commands/cargo-init.md @@ -0,0 +1,3 @@ +# cargo init +{{#include command-common.html}} +{{#include ../../man/generated/cargo-init.html}} diff --git a/src/doc/src/commands/cargo-install.md b/src/doc/src/commands/cargo-install.md new file mode 100644 index 000000000..5fa11a60d --- /dev/null +++ b/src/doc/src/commands/cargo-install.md @@ -0,0 +1,3 @@ +# cargo install +{{#include command-common.html}} +{{#include ../../man/generated/cargo-install.html}} diff --git a/src/doc/src/commands/cargo-locate-project.md b/src/doc/src/commands/cargo-locate-project.md new file mode 100644 index 000000000..0e42adfe4 --- /dev/null +++ b/src/doc/src/commands/cargo-locate-project.md @@ -0,0 +1,3 @@ +# cargo locate-project +{{#include command-common.html}} +{{#include ../../man/generated/cargo-locate-project.html}} diff --git a/src/doc/src/commands/cargo-login.md b/src/doc/src/commands/cargo-login.md new file mode 100644 index 000000000..5feddeead --- /dev/null +++ b/src/doc/src/commands/cargo-login.md @@ -0,0 +1,3 @@ +# cargo login +{{#include command-common.html}} +{{#include ../../man/generated/cargo-login.html}} diff --git a/src/doc/src/commands/cargo-metadata.md b/src/doc/src/commands/cargo-metadata.md new file mode 100644 index 000000000..273221395 --- /dev/null +++ b/src/doc/src/commands/cargo-metadata.md @@ -0,0 +1,3 @@ +# cargo metadata +{{#include command-common.html}} +{{#include ../../man/generated/cargo-metadata.html}} diff --git a/src/doc/src/commands/cargo-new.md b/src/doc/src/commands/cargo-new.md new file mode 100644 index 000000000..3f1a49024 --- /dev/null +++ b/src/doc/src/commands/cargo-new.md @@ -0,0 +1,3 @@ +# cargo new +{{#include command-common.html}} +{{#include ../../man/generated/cargo-new.html}} diff --git a/src/doc/src/commands/cargo-owner.md b/src/doc/src/commands/cargo-owner.md new file mode 100644 index 000000000..c1dbc9eb6 --- /dev/null +++ b/src/doc/src/commands/cargo-owner.md @@ -0,0 +1,3 @@ +# cargo owner +{{#include command-common.html}} +{{#include ../../man/generated/cargo-owner.html}} diff --git a/src/doc/src/commands/cargo-package.md b/src/doc/src/commands/cargo-package.md new file mode 100644 index 000000000..8bfedcfa4 --- /dev/null +++ b/src/doc/src/commands/cargo-package.md @@ -0,0 +1,3 @@ +# cargo package +{{#include command-common.html}} +{{#include ../../man/generated/cargo-package.html}} diff --git a/src/doc/src/commands/cargo-pkgid.md b/src/doc/src/commands/cargo-pkgid.md new file mode 100644 index 000000000..7481ad1c7 --- /dev/null +++ b/src/doc/src/commands/cargo-pkgid.md @@ -0,0 +1,3 @@ +# cargo pkgid +{{#include command-common.html}} +{{#include ../../man/generated/cargo-pkgid.html}} diff --git a/src/doc/src/commands/cargo-publish.md b/src/doc/src/commands/cargo-publish.md new file mode 100644 index 000000000..db45e7899 --- /dev/null +++ b/src/doc/src/commands/cargo-publish.md @@ -0,0 +1,3 @@ +# cargo publish +{{#include command-common.html}} +{{#include ../../man/generated/cargo-publish.html}} diff --git a/src/doc/src/commands/cargo-run.md b/src/doc/src/commands/cargo-run.md new file mode 100644 index 000000000..16d92815f --- /dev/null +++ b/src/doc/src/commands/cargo-run.md @@ -0,0 +1,3 @@ +# cargo run +{{#include command-common.html}} +{{#include ../../man/generated/cargo-run.html}} diff --git a/src/doc/src/commands/cargo-rustc.md b/src/doc/src/commands/cargo-rustc.md new file mode 100644 index 000000000..ad6b3d987 --- /dev/null +++ b/src/doc/src/commands/cargo-rustc.md @@ -0,0 +1,3 @@ +# cargo rustc +{{#include command-common.html}} +{{#include ../../man/generated/cargo-rustc.html}} diff --git a/src/doc/src/commands/cargo-rustdoc.md b/src/doc/src/commands/cargo-rustdoc.md new file mode 100644 index 000000000..a8ebf1950 --- /dev/null +++ b/src/doc/src/commands/cargo-rustdoc.md @@ -0,0 +1,3 @@ +# cargo rustdoc +{{#include command-common.html}} +{{#include ../../man/generated/cargo-rustdoc.html}} diff --git a/src/doc/src/commands/cargo-search.md b/src/doc/src/commands/cargo-search.md new file mode 100644 index 000000000..b872d9628 --- /dev/null +++ b/src/doc/src/commands/cargo-search.md @@ -0,0 +1,3 @@ +# cargo search +{{#include command-common.html}} +{{#include ../../man/generated/cargo-search.html}} diff --git a/src/doc/src/commands/cargo-test.md b/src/doc/src/commands/cargo-test.md new file mode 100644 index 000000000..52fc96912 --- /dev/null +++ b/src/doc/src/commands/cargo-test.md @@ -0,0 +1,3 @@ +# cargo test +{{#include command-common.html}} +{{#include ../../man/generated/cargo-test.html}} diff --git a/src/doc/src/commands/cargo-uninstall.md b/src/doc/src/commands/cargo-uninstall.md new file mode 100644 index 000000000..971ad3435 --- /dev/null +++ b/src/doc/src/commands/cargo-uninstall.md @@ -0,0 +1,3 @@ +# cargo uninstall +{{#include command-common.html}} +{{#include ../../man/generated/cargo-uninstall.html}} diff --git a/src/doc/src/commands/cargo-update.md b/src/doc/src/commands/cargo-update.md new file mode 100644 index 000000000..2be849863 --- /dev/null +++ b/src/doc/src/commands/cargo-update.md @@ -0,0 +1,3 @@ +# cargo update +{{#include command-common.html}} +{{#include ../../man/generated/cargo-update.html}} diff --git a/src/doc/src/commands/cargo-verify-project.md b/src/doc/src/commands/cargo-verify-project.md new file mode 100644 index 000000000..4a4c76682 --- /dev/null +++ b/src/doc/src/commands/cargo-verify-project.md @@ -0,0 +1,3 @@ +# cargo verify-project +{{#include command-common.html}} +{{#include ../../man/generated/cargo-verify-project.html}} diff --git a/src/doc/src/commands/cargo-version.md b/src/doc/src/commands/cargo-version.md new file mode 100644 index 000000000..12833ede4 --- /dev/null +++ b/src/doc/src/commands/cargo-version.md @@ -0,0 +1,3 @@ +# cargo version +{{#include command-common.html}} +{{#include ../../man/generated/cargo-version.html}} diff --git a/src/doc/src/commands/cargo-yank.md b/src/doc/src/commands/cargo-yank.md new file mode 100644 index 000000000..d6ca7b3c6 --- /dev/null +++ b/src/doc/src/commands/cargo-yank.md @@ -0,0 +1,3 @@ +# cargo yank +{{#include command-common.html}} +{{#include ../../man/generated/cargo-yank.html}} diff --git a/src/doc/src/commands/command-common.html b/src/doc/src/commands/command-common.html new file mode 100644 index 000000000..93750600f --- /dev/null +++ b/src/doc/src/commands/command-common.html @@ -0,0 +1,18 @@ + diff --git a/src/doc/src/commands/general-commands.md b/src/doc/src/commands/general-commands.md new file mode 100644 index 000000000..2fb4f7a97 --- /dev/null +++ b/src/doc/src/commands/general-commands.md @@ -0,0 +1 @@ +# General Commands diff --git a/src/doc/src/commands/index.md b/src/doc/src/commands/index.md new file mode 100644 index 000000000..a9ef0a2d2 --- /dev/null +++ b/src/doc/src/commands/index.md @@ -0,0 +1,3 @@ +# cargo +{{#include command-common.html}} +{{#include ../../man/generated/cargo.html}} diff --git a/src/doc/src/commands/manifest-commands.md b/src/doc/src/commands/manifest-commands.md new file mode 100644 index 000000000..cd803bdc3 --- /dev/null +++ b/src/doc/src/commands/manifest-commands.md @@ -0,0 +1 @@ +# Manifest Commands diff --git a/src/doc/src/commands/package-commands.md b/src/doc/src/commands/package-commands.md new file mode 100644 index 000000000..f528a2571 --- /dev/null +++ b/src/doc/src/commands/package-commands.md @@ -0,0 +1 @@ +# Package Commands diff --git a/src/doc/src/commands/publishing-commands.md b/src/doc/src/commands/publishing-commands.md new file mode 100644 index 000000000..e32bf3c16 --- /dev/null +++ b/src/doc/src/commands/publishing-commands.md @@ -0,0 +1 @@ +# Publishing Commands diff --git a/src/doc/src/faq.md b/src/doc/src/faq.md new file mode 100644 index 000000000..108dfc205 --- /dev/null +++ b/src/doc/src/faq.md @@ -0,0 +1,193 @@ +## Frequently Asked Questions + +### Is the plan to use GitHub as a package repository? + +No. The plan for Cargo is to use [crates.io], like npm or Rubygems do with +npmjs.org and rubygems.org. + +We plan to support git repositories as a source of packages forever, +because they can be used for early development and temporary patches, +even when people use the registry as the primary source of packages. + +### Why build crates.io rather than use GitHub as a registry? + +We think that it’s very important to support multiple ways to download +packages, including downloading from GitHub and copying packages into +your package itself. + +That said, we think that [crates.io] offers a number of important benefits, and +will likely become the primary way that people download packages in Cargo. + +For precedent, both Node.js’s [npm][1] and Ruby’s [bundler][2] support both a +central registry model as well as a Git-based model, and most packages +are downloaded through the registry in those ecosystems, with an +important minority of packages making use of git-based packages. + +[1]: https://www.npmjs.org +[2]: https://bundler.io + +Some of the advantages that make a central registry popular in other +languages include: + +* **Discoverability**. A central registry provides an easy place to look + for existing packages. Combined with tagging, this also makes it + possible for a registry to provide ecosystem-wide information, such as a + list of the most popular or most-depended-on packages. +* **Speed**. A central registry makes it possible to easily fetch just + the metadata for packages quickly and efficiently, and then to + efficiently download just the published package, and not other bloat + that happens to exist in the repository. This adds up to a significant + improvement in the speed of dependency resolution and fetching. As + dependency graphs scale up, downloading all of the git repositories bogs + down fast. Also remember that not everybody has a high-speed, + low-latency Internet connection. + +### Will Cargo work with C code (or other languages)? + +Yes! + +Cargo handles compiling Rust code, but we know that many Rust packages +link against C code. We also know that there are decades of tooling +built up around compiling languages other than Rust. + +Our solution: Cargo allows a package to [specify a script](reference/build-scripts.html) +(written in Rust) to run before invoking `rustc`. Rust is leveraged to +implement platform-specific configuration and refactor out common build +functionality among packages. + +### Can Cargo be used inside of `make` (or `ninja`, or ...) + +Indeed. While we intend Cargo to be useful as a standalone way to +compile Rust packages at the top-level, we know that some people will +want to invoke Cargo from other build tools. + +We have designed Cargo to work well in those contexts, paying attention +to things like error codes and machine-readable output modes. We still +have some work to do on those fronts, but using Cargo in the context of +conventional scripts is something we designed for from the beginning and +will continue to prioritize. + +### Does Cargo handle multi-platform packages or cross-compilation? + +Rust itself provides facilities for configuring sections of code based +on the platform. Cargo also supports [platform-specific +dependencies][target-deps], and we plan to support more per-platform +configuration in `Cargo.toml` in the future. + +[target-deps]: reference/specifying-dependencies.html#platform-specific-dependencies + +In the longer-term, we’re looking at ways to conveniently cross-compile +packages using Cargo. + +### Does Cargo support environments, like `production` or `test`? + +We support environments through the use of [profiles][profile] to support: + +[profile]: reference/manifest.html#the-profile-sections + +* environment-specific flags (like `-g --opt-level=0` for development + and `--opt-level=3` for production). +* environment-specific dependencies (like `hamcrest` for test assertions). +* environment-specific `#[cfg]` +* a `cargo test` command + +### Does Cargo work on Windows? + +Yes! + +All commits to Cargo are required to pass the local test suite on Windows. +If, however, you find a Windows issue, we consider it a bug, so [please file an +issue][3]. + +[3]: https://github.com/rust-lang/cargo/issues + +### Why do binaries have `Cargo.lock` in version control, but not libraries? + +The purpose of a `Cargo.lock` is to describe the state of the world at the time +of a successful build. It is then used to provide deterministic builds across +whatever machine is building the package by ensuring that the exact same +dependencies are being compiled. + +This property is most desirable from applications and packages which are at the +very end of the dependency chain (binaries). As a result, it is recommended that +all binaries check in their `Cargo.lock`. + +For libraries the situation is somewhat different. A library is not only used by +the library developers, but also any downstream consumers of the library. Users +dependent on the library will not inspect the library’s `Cargo.lock` (even if it +exists). This is precisely because a library should **not** be deterministically +recompiled for all users of the library. + +If a library ends up being used transitively by several dependencies, it’s +likely that just a single copy of the library is desired (based on semver +compatibility). If Cargo used all of the dependencies' `Cargo.lock` files, +then multiple copies of the library could be used, and perhaps even a version +conflict. + +In other words, libraries specify semver requirements for their dependencies but +cannot see the full picture. Only end products like binaries have a full +picture to decide what versions of dependencies should be used. + +### Can libraries use `*` as a version for their dependencies? + +**As of January 22nd, 2016, [crates.io] rejects all packages (not just libraries) +with wildcard dependency constraints.** + +While libraries _can_, strictly speaking, they should not. A version requirement +of `*` says “This will work with every version ever,” which is never going +to be true. Libraries should always specify the range that they do work with, +even if it’s something as general as “every 1.x.y version.” + +### Why `Cargo.toml`? + +As one of the most frequent interactions with Cargo, the question of why the +configuration file is named `Cargo.toml` arises from time to time. The leading +capital-`C` was chosen to ensure that the manifest was grouped with other +similar configuration files in directory listings. Sorting files often puts +capital letters before lowercase letters, ensuring files like `Makefile` and +`Cargo.toml` are placed together. The trailing `.toml` was chosen to emphasize +the fact that the file is in the [TOML configuration +format](https://github.com/toml-lang/toml). + +Cargo does not allow other names such as `cargo.toml` or `Cargofile` to +emphasize the ease of how a Cargo repository can be identified. An option of +many possible names has historically led to confusion where one case was handled +but others were accidentally forgotten. + +[crates.io]: https://crates.io/ + +### How can Cargo work offline? + +Cargo is often used in situations with limited or no network access such as +airplanes, CI environments, or embedded in large production deployments. Users +are often surprised when Cargo attempts to fetch resources from the network, and +hence the request for Cargo to work offline comes up frequently. + +Cargo, at its heart, will not attempt to access the network unless told to do +so. That is, if no crates comes from crates.io, a git repository, or some other +network location, Cargo will never attempt to make a network connection. As a +result, if Cargo attempts to touch the network, then it's because it needs to +fetch a required resource. + +Cargo is also quite aggressive about caching information to minimize the amount +of network activity. It will guarantee, for example, that if `cargo build` (or +an equivalent) is run to completion then the next `cargo build` is guaranteed to +not touch the network so long as `Cargo.toml` has not been modified in the +meantime. This avoidance of the network boils down to a `Cargo.lock` existing +and a populated cache of the crates reflected in the lock file. If either of +these components are missing, then they're required for the build to succeed and +must be fetched remotely. + +As of Rust 1.11.0 Cargo understands a new flag, `--frozen`, which is an +assertion that it shouldn't touch the network. When passed, Cargo will +immediately return an error if it would otherwise attempt a network request. +The error should include contextual information about why the network request is +being made in the first place to help debug as well. Note that this flag *does +not change the behavior of Cargo*, it simply asserts that Cargo shouldn't touch +the network as a previous command has been run to ensure that network activity +shouldn't be necessary. + +For more information about vendoring, see documentation on [source +replacement][replace]. + +[replace]: reference/source-replacement.html diff --git a/src/doc/src/getting-started/first-steps.md b/src/doc/src/getting-started/first-steps.md new file mode 100644 index 000000000..0a3b730dc --- /dev/null +++ b/src/doc/src/getting-started/first-steps.md @@ -0,0 +1,73 @@ +## First Steps with Cargo + +To start a new package with Cargo, use `cargo new`: + +```console +$ cargo new hello_world +``` + +Cargo defaults to `--bin` to make a binary program. To make a library, we'd +pass `--lib`. + +Let’s check out what Cargo has generated for us: + +```console +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +This is all we need to get started. First, let’s check out `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +edition = "2018" + +[dependencies] +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your package. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```console +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) +``` + +And then run it: + +```console +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step: + +```console +$ cargo run + Fresh hello_world v0.1.0 (file:///path/to/package/hello_world) + Running `target/hello_world` +Hello, world! +``` + +### Going further + +For more details on using Cargo, check out the [Cargo Guide](guide/index.html) diff --git a/src/doc/src/getting-started/index.md b/src/doc/src/getting-started/index.md new file mode 100644 index 000000000..22a7315cf --- /dev/null +++ b/src/doc/src/getting-started/index.md @@ -0,0 +1,6 @@ +## Getting Started + +To get started with Cargo, install Cargo (and Rust) and set up your first crate. + +* [Installation](getting-started/installation.html) +* [First steps with Cargo](getting-started/first-steps.html) diff --git a/src/doc/src/getting-started/installation.md b/src/doc/src/getting-started/installation.md new file mode 100644 index 000000000..d7e55db19 --- /dev/null +++ b/src/doc/src/getting-started/installation.md @@ -0,0 +1,37 @@ +## Installation + +### Install Rust and Cargo + +The easiest way to get Cargo is to install the current stable release of [Rust] +by using `rustup`. Installing Rust using `rustup` will also install `cargo`. + +On Linux and macOS systems, this is done as follows: + +```console +$ curl https://sh.rustup.rs -sSf | sh +``` + +It will download a script, and start the installation. If everything goes well, +you’ll see this appear: + +```console +Rust is installed now. Great! +``` + +On Windows, download and run [rustup-init.exe]. It will start the installation +in a console and present the above message on success. + +After this, you can use the `rustup` command to also install `beta` or `nightly` +channels for Rust and Cargo. + +For other installation options and information, visit the +[install][install-rust] page of the Rust website. + +### Build and Install Cargo from Source + +Alternatively, you can [build Cargo from source][compiling-from-source]. + +[rust]: https://www.rust-lang.org/ +[rustup-init.exe]: https://win.rustup.rs/ +[install-rust]: https://www.rust-lang.org/tools/install +[compiling-from-source]: https://github.com/rust-lang/cargo#compiling-from-source diff --git a/src/doc/src/guide/build-cache.md b/src/doc/src/guide/build-cache.md new file mode 100644 index 000000000..d253b8acc --- /dev/null +++ b/src/doc/src/guide/build-cache.md @@ -0,0 +1,14 @@ +## Build cache + +Cargo shares build artifacts among all the packages of a single workspace. +Today, Cargo does not share build results across different workspaces, but +a similar result can be achieved by using a third party tool, [sccache]. + +To setup `sccache`, install it with `cargo install sccache` and set +`RUSTC_WRAPPER` environmental variable to `sccache` before invoking Cargo. +If you use bash, it makes sense to add `export RUSTC_WRAPPER=sccache` to +`.bashrc`. Refer to sccache documentation for more details. + +[sccache]: https://github.com/mozilla/sccache + + diff --git a/src/doc/src/guide/cargo-toml-vs-cargo-lock.md b/src/doc/src/guide/cargo-toml-vs-cargo-lock.md new file mode 100644 index 000000000..15cee96cf --- /dev/null +++ b/src/doc/src/guide/cargo-toml-vs-cargo-lock.md @@ -0,0 +1,103 @@ +## Cargo.toml vs Cargo.lock + +`Cargo.toml` and `Cargo.lock` serve two different purposes. Before we talk +about them, here’s a summary: + +* `Cargo.toml` is about describing your dependencies in a broad sense, and is + written by you. +* `Cargo.lock` contains exact information about your dependencies. It is + maintained by Cargo and should not be manually edited. + +If you’re building a non-end product, such as a rust library that other rust packages will depend on, put +`Cargo.lock` in your `.gitignore`. If you’re building an end product, which are executable +like command-line tool or an application, or a system library with crate-type of `staticlib` or `cdylib`, +check `Cargo.lock` into `git`. If you're curious about why that is, see +["Why do binaries have `Cargo.lock` in version control, but not libraries?" in the +FAQ](faq.html#why-do-binaries-have-cargolock-in-version-control-but-not-libraries). + +Let’s dig in a little bit more. + +`Cargo.toml` is a **manifest** file in which we can specify a bunch of +different metadata about our package. For example, we can say that we depend +on another package: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +This package has a single dependency, on the `rand` library. We’ve stated in +this case that we’re relying on a particular Git repository that lives on +GitHub. Since we haven’t specified any other information, Cargo assumes that +we intend to use the latest commit on the `master` branch to build our package. + +Sound good? Well, there’s one problem: If you build this package today, and +then you send a copy to me, and I build this package tomorrow, something bad +could happen. There could be more commits to `rand` in the meantime, and my +build would include new commits while yours would not. Therefore, we would +get different builds. This would be bad because we want reproducible builds. + +We could fix this problem by putting a `rev` line in our `Cargo.toml`: + +```toml +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git", rev = "9f35b8e" } +``` + +Now our builds will be the same. But there’s a big drawback: now we have to +manually think about SHA-1s every time we want to update our library. This is +both tedious and error prone. + +Enter the `Cargo.lock`. Because of its existence, we don’t need to manually +keep track of the exact revisions: Cargo will do it for us. When we have a +manifest like this: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +Cargo will take the latest commit and write that information out into our +`Cargo.lock` when we build for the first time. That file will look like this: + +```toml +[[package]] +name = "hello_world" +version = "0.1.0" +dependencies = [ + "rand 0.1.0 (git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9)", +] + +[[package]] +name = "rand" +version = "0.1.0" +source = "git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9" +``` + +You can see that there’s a lot more information here, including the exact +revision we used to build. Now when you give your package to someone else, +they’ll use the exact same SHA, even though we didn’t specify it in our +`Cargo.toml`. + +When we’re ready to opt in to a new version of the library, Cargo can +re-calculate the dependencies and update things for us: + +```console +$ cargo update # updates all dependencies +$ cargo update -p rand # updates just “rand” +``` + +This will write out a new `Cargo.lock` with the new version information. Note +that the argument to `cargo update` is actually a +[Package ID Specification](reference/pkgid-spec.html) and `rand` is just a short +specification. diff --git a/src/doc/src/guide/continuous-integration.md b/src/doc/src/guide/continuous-integration.md new file mode 100644 index 000000000..25d2e3851 --- /dev/null +++ b/src/doc/src/guide/continuous-integration.md @@ -0,0 +1,88 @@ +## Continuous Integration + +### Travis CI + +To test your package on Travis CI, here is a sample `.travis.yml` file: + +```yaml +language: rust +rust: + - stable + - beta + - nightly +matrix: + allow_failures: + - rust: nightly +``` + +This will test all three release channels, but any breakage in nightly +will not fail your overall build. Please see the [Travis CI Rust +documentation](https://docs.travis-ci.com/user/languages/rust/) for more +information. + +### GitLab CI + +To test your package on GitLab CI, here is a sample `.gitlab-ci.yml` file: + +```yaml +stages: + - build + +rust-latest: + stage: build + image: rust:latest + script: + - cargo build --verbose + - cargo test --verbose + +rust-nightly: + stage: build + image: rustlang/rust:nightly + script: + - cargo build --verbose + - cargo test --verbose + allow_failure: true +``` + +This will test on the stable channel and nightly channel, but any +breakage in nightly will not fail your overall build. Please see the +[GitLab CI](https://docs.gitlab.com/ce/ci/yaml/README.html) for more +information. + +### builds.sr.ht + +To test your package on sr.ht, here is a sample `.build.yml` file. +Be sure to change `` and `` to the repo to clone and +the directory where it was cloned. + +```yaml +image: archlinux +packages: + - rustup +sources: + - +tasks: + - setup: | + rustup toolchain install nightly stable + cd / + rustup run stable cargo fetch + - stable: | + rustup default stable + cd / + cargo build --verbose + cargo test --verbose + - nightly: | + rustup default nightly + cd / + cargo build --verbose ||: + cargo test --verbose ||: + - docs: | + cd / + rustup run stable cargo doc --no-deps + rustup run nightly cargo doc --no-deps ||: +``` + +This will test and build documentation on the stable channel and nightly +channel, but any breakage in nightly will not fail your overall build. Please +see the [builds.sr.ht documentation](https://man.sr.ht/builds.sr.ht/) for more +information. diff --git a/src/doc/src/guide/creating-a-new-project.md b/src/doc/src/guide/creating-a-new-project.md new file mode 100644 index 000000000..92a97a2d6 --- /dev/null +++ b/src/doc/src/guide/creating-a-new-project.md @@ -0,0 +1,91 @@ +## Creating a New Package + +To start a new package with Cargo, use `cargo new`: + +```console +$ cargo new hello_world --bin +``` + +We’re passing `--bin` because we’re making a binary program: if we +were making a library, we’d pass `--lib`. This also initializes a new `git` +repository by default. If you don't want it to do that, pass `--vcs none`. + +Let’s check out what Cargo has generated for us: + +```console +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +Let’s take a closer look at `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +edition = "2018" + +[dependencies] + +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your package. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```console +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) +``` + +And then run it: + +```console +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step (You +won't see the `Compiling` line if you have not made any changes since you last +compiled): + +```console +$ cargo run + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) + Running `target/debug/hello_world` +Hello, world! +``` + +You’ll now notice a new file, `Cargo.lock`. It contains information about our +dependencies. Since we don’t have any yet, it’s not very interesting. + +Once you’re ready for release, you can use `cargo build --release` to compile +your files with optimizations turned on: + +```console +$ cargo build --release + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) +``` + +`cargo build --release` puts the resulting binary in `target/release` instead of +`target/debug`. + +Compiling in debug mode is the default for development-- compilation time is +shorter since the compiler doesn't do optimizations, but the code will run +slower. Release mode takes longer to compile, but the code will run faster. diff --git a/src/doc/src/guide/dependencies.md b/src/doc/src/guide/dependencies.md new file mode 100644 index 000000000..80167ebe1 --- /dev/null +++ b/src/doc/src/guide/dependencies.md @@ -0,0 +1,90 @@ +## Dependencies + +[crates.io] is the Rust community's central package registry that serves as a +location to discover and download packages. `cargo` is configured to use it by +default to find requested packages. + +To depend on a library hosted on [crates.io], add it to your `Cargo.toml`. + +[crates.io]: https://crates.io/ + +### Adding a dependency + +If your `Cargo.toml` doesn't already have a `[dependencies]` section, add that, +then list the crate name and version that you would like to use. This example +adds a dependency of the `time` crate: + +```toml +[dependencies] +time = "0.1.12" +``` + +The version string is a [semver] version requirement. The [specifying +dependencies](reference/specifying-dependencies.html) docs have more information about +the options you have here. + +[semver]: https://github.com/steveklabnik/semver#requirements + +If we also wanted to add a dependency on the `regex` crate, we would not need +to add `[dependencies]` for each crate listed. Here's what your whole +`Cargo.toml` file would look like with dependencies on the `time` and `regex` +crates: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +time = "0.1.12" +regex = "0.1.41" +``` + +Re-run `cargo build`, and Cargo will fetch the new dependencies and all of +their dependencies, compile them all, and update the `Cargo.lock`: + +```console +$ cargo build + Updating crates.io index + Downloading memchr v0.1.5 + Downloading libc v0.1.10 + Downloading regex-syntax v0.2.1 + Downloading memchr v0.1.5 + Downloading aho-corasick v0.3.0 + Downloading regex v0.1.41 + Compiling memchr v0.1.5 + Compiling libc v0.1.10 + Compiling regex-syntax v0.2.1 + Compiling memchr v0.1.5 + Compiling aho-corasick v0.3.0 + Compiling regex v0.1.41 + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) +``` + +Our `Cargo.lock` contains the exact information about which revision of all of +these dependencies we used. + +Now, if `regex` gets updated, we will still build with the same revision until +we choose to `cargo update`. + +You can now use the `regex` library using `extern crate` in `main.rs`. + +```rust +extern crate regex; + +use regex::Regex; + +fn main() { + let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); + println!("Did our date match? {}", re.is_match("2014-01-01")); +} +``` + +Running it will show: + +```console +$ cargo run + Running `target/hello_world` +Did our date match? true +``` diff --git a/src/doc/src/guide/index.md b/src/doc/src/guide/index.md new file mode 100644 index 000000000..08fb1c6d9 --- /dev/null +++ b/src/doc/src/guide/index.md @@ -0,0 +1,14 @@ +## Cargo Guide + +This guide will give you all that you need to know about how to use Cargo to +develop Rust packages. + +* [Why Cargo Exists](guide/why-cargo-exists.html) +* [Creating a New Package](guide/creating-a-new-project.html) +* [Working on an Existing Cargo Package](guide/working-on-an-existing-project.html) +* [Dependencies](guide/dependencies.html) +* [Package Layout](guide/project-layout.html) +* [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.html) +* [Tests](guide/tests.html) +* [Continuous Integration](guide/continuous-integration.html) +* [Build Cache](guide/build-cache.html) diff --git a/src/doc/src/guide/project-layout.md b/src/doc/src/guide/project-layout.md new file mode 100644 index 000000000..516f06593 --- /dev/null +++ b/src/doc/src/guide/project-layout.md @@ -0,0 +1,35 @@ +## Package Layout + +Cargo uses conventions for file placement to make it easy to dive into a new +Cargo package: + +``` +. +├── Cargo.lock +├── Cargo.toml +├── benches +│   └── large-input.rs +├── examples +│   └── simple.rs +├── src +│   ├── bin +│   │   └── another_executable.rs +│   ├── lib.rs +│   └── main.rs +└── tests + └── some-integration-tests.rs +``` + +* `Cargo.toml` and `Cargo.lock` are stored in the root of your package (*package + root*). +* Source code goes in the `src` directory. +* The default library file is `src/lib.rs`. +* The default executable file is `src/main.rs`. +* Other executables can be placed in `src/bin/*.rs`. +* Integration tests go in the `tests` directory (unit tests go in each file + they're testing). +* Examples go in the `examples` directory. +* Benchmarks go in the `benches` directory. + +These are explained in more detail in the [manifest +description](reference/manifest.html#the-project-layout). diff --git a/src/doc/src/guide/tests.md b/src/doc/src/guide/tests.md new file mode 100644 index 000000000..3ffa4af7a --- /dev/null +++ b/src/doc/src/guide/tests.md @@ -0,0 +1,39 @@ +## Tests + +Cargo can run your tests with the `cargo test` command. Cargo looks for tests +to run in two places: in each of your `src` files and any tests in `tests/`. +Tests in your `src` files should be unit tests, and tests in `tests/` should be +integration-style tests. As such, you’ll need to import your crates into +the files in `tests`. + +Here's an example of running `cargo test` in our package, which currently has +no tests: + +```console +$ cargo test + Compiling rand v0.1.0 (https://github.com/rust-lang-nursery/rand.git#9f35b8e) + Compiling hello_world v0.1.0 (file:///path/to/package/hello_world) + Running target/test/hello_world-9c2b65bbb79eabce + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out +``` + +If our package had tests, we would see more output with the correct number of +tests. + +You can also run a specific test by passing a filter: + +```console +$ cargo test foo +``` + +This will run any test with `foo` in its name. + +`cargo test` runs additional checks as well. For example, it will compile any +examples you’ve included and will also test the examples in your +documentation. Please see the [testing guide][testing] in the Rust +documentation for more details. + +[testing]: https://doc.rust-lang.org/book/testing.html diff --git a/src/doc/src/guide/why-cargo-exists.md b/src/doc/src/guide/why-cargo-exists.md new file mode 100644 index 000000000..215bbceeb --- /dev/null +++ b/src/doc/src/guide/why-cargo-exists.md @@ -0,0 +1,12 @@ +## Why Cargo Exists + +Cargo is a tool that allows Rust packages to declare their various +dependencies and ensure that you’ll always get a repeatable build. + +To accomplish this goal, Cargo does four things: + +* Introduces two metadata files with various bits of package information. +* Fetches and builds your package’s dependencies. +* Invokes `rustc` or another build tool with the correct parameters to build + your package. +* Introduces conventions to make working with Rust packages easier. diff --git a/src/doc/src/guide/working-on-an-existing-project.md b/src/doc/src/guide/working-on-an-existing-project.md new file mode 100644 index 000000000..ff5a31f2d --- /dev/null +++ b/src/doc/src/guide/working-on-an-existing-project.md @@ -0,0 +1,22 @@ +## Working on an Existing Cargo Package + +If you download an existing package that uses Cargo, it’s really easy +to get going. + +First, get the package from somewhere. In this example, we’ll use `rand` +cloned from its repository on GitHub: + +```console +$ git clone https://github.com/rust-lang-nursery/rand.git +$ cd rand +``` + +To build, use `cargo build`: + +```console +$ cargo build + Compiling rand v0.1.0 (file:///path/to/package/rand) +``` + +This will fetch all of the dependencies and then build them, along with the +package. diff --git a/src/doc/src/images/Cargo-Logo-Small.png b/src/doc/src/images/Cargo-Logo-Small.png new file mode 100644 index 0000000000000000000000000000000000000000..e3a99208c287bc3fba67de085255df1b4ad22c0a GIT binary patch literal 58168 zcmV)>K!d-DP)qr?$rj3;+NC0000000000000000000000000000000001hnzLgMuo5Ie z9LB$Y&#cb2ZQHhO+eU1k;+$>Uwr%_Qofp$pw>lf)>tFIMW;+{KYG(L>!)F=1=YYco z?%MmI&+gp!tbuzDS!v|KqqBDWU!_w2|DUE(kDOQ;df=$_2JSxa!ht*Y8XdTO7azQR zCm*yQtG_?ACkn(<}F&^?kH>QB5N*+wZICZ zMXVhXK^b|TgUXo7!zxv-4_Wp6tWmh?jSW54_pqX|uE*vi zcngc|%*cZXp?}sKZx=$q6$l|GWF=fBS5|pSdG(@@SFZ*>CrSRkw2>wMoDoE9F6ptc ztE|4gTXRk;niNF~VFWn>%zN+d;Piw5k}z|k^*-l$jzXsrrqURpdP82Wp0BL(@ZZ_e zl0@z28T?I2+P`}FT1P!oQLW~%BxobpOPehK!Xn+N0R&zNjcVBh1l5LV`EDz>eK%)yn+nj zxI0C3BnUwS3HScsqC*6Mq;}wdJK&%ILYO;0a6||hkP-3{rt-M5%1t4!UO*^42((C& zpP-?KemlsLWU#dIoKrMk-mcpGVAocwv9uLUGDUy^(LvmvPWnRZ0NByxdeEDn?Zm35)eWFf%k`wyVDVZy5Z1@2K+R* ztbaxlZjJ+a9uQ#WboAel9Jh!No=-+;^4Z$WPF9nOk@hUAokZQb0-f% z649I=NPz!zJ8ACyVI@j=z}+$b=^TPAfSX|!QC20?X6O#EtTr949FI+~76?LC>YizN zmAX$_yz=hYV>jRR#Dxz%`t%LJ5EDwGWtLrzi6^PW&!^gYoq0FgX@mLqDC@g5EjG%+ zDT)HK2oCPnyRDNb%)0O2vvXEaawN%`{>l32OZeZ=<28FI`eZHN9ps6eBfB9mjDZZa-qCdp-ZzNr5Z{Be>~?F`x1VQq>uJ(( zi&-noX8qZk(f2c^>^D>NUxw@cy}JG@feo+6+6Q7^k2$h^O(C`^?tZ3Y`t8KsFOJi2 z1p>s$ETfjbRVhHGkA^bS7>Eq-Zut1r%J{k ziXf;fVO!UtuKjR!{kzca9l(cp-PFG1<~RHqURSlBcl?wbcE2Ho-ET^9c$4G_Qt5k< zaV6e>K(0M3A!h2SWJ*whgpkX=h@uKLI+3#dTuv2Wl+#IDF+`kXT-)Pj595BwI`$8s z&Osa?4M+|KrlD9z5ux#}K*@t^Q9mP#vRmEdyLkH@SP`X@=96Xk^F1ys;*KMyiq& z0V%H$gVa4x6N7RM?%ICNWgh@V)P8M5yIs=uYeD|q?6F_?#lVVvy=F>QAN@S`_1dZC z@>36<{@}m-m0?)_wsbb{nB(Y$fRe~586a{|NR5;7j~Plv)k;OG3Zw3U*fMYtVvSSS zJ|ZQ!N-dRy0aF_2?x+2s(CWd%){HlBg&mTtac|af3W=0WebF@Dmii9oW~Sh_+ZCrb zpJQ|9IhgJ;?}XW`11az->3@-XCIAX>soymv123Wma8$(hGSutT!wLit5@IUN`Id}- zroHb??`a?UF>eQcfPG!~lI`qI51;tj=7Asa@7o7|%opx+X&1L-y7NKyb-_zq z`_i}nlYcaBm%pQN=}O~4b0CJD0mDfKa+$y&C*dGANhdlVYC{ zLQ+#_uIcTtL7Iaf?5_RPp9S1xUl*=q^YJfWUl+UF!6VmKA9~Mwx5IdOx4z4?J;7;W z)^~X4$Y~^(CsGIoA!05O2?VN|qMRr~oeYwydTd84*o2g#*+LPIpWSD(j=H#tx}KaS z;$(??R19A1#z?f@moV>Ji%p<5N<@-|aoDlG`y%Vx&l0w`n6=5&m-oJ^tCVW5?%tvF z3LC7X44(r8v#eBbiBMpc%6F{$2I*8GE%ki^djb`QZ(6%&ZRaFuvS=eIv&_TE*QT&L zcE0)Ctld6ndx1(`{hB|*zAkoMdeiSbUjE>}cyAUx^6i3gg57q_c6H0FT;KJ~=QD&v z2xd78lz^K1s!j4@rE0^!^pXTM03>V314y%=J3KCl`yne00oW>F%|2JODG?dEUQ zrvGwa$^XyVKk9lPzqsf%fAUuz-}$b8{N6C_j=XQd&uQoPk)*Q-!|LP~)9w_nFq_SB z-Xl3c8X-r_W+^q_1)!p3&)`K)2}pKw=w}b02aj6jOSCn`Cuu+kk3|+whpAnQS&aPpH z1KK@}wx5Ca?*zV=ef@t>WMBKIxY)I?!%zLS$G5-Z@4Pn*r$-88JWYQ<){bU&ANU1m z%VvGbc6pmLZRz{Y#&pb1g#l6kA_^5|_hn9QJcA|fbz(7Zt;uhR`&?jZQ4TrRWY5&5KAbyIG&Na3 zr*$Ab#@g2^CzR=IuE8~3j?$yP2T_S(BFDj&WCNI4>ktFvgex%= z6{$cpNNQ1YVu~)P@4gA`-X^a5{QXln{@<0{*gqlm!q>qk|ElBL-~M;rJ8kbAd5MIS zXqt|g1m7)4w6wF!&>SMo0U<=z%iD~b6O_n&KBwzigI1ab1Dn@~Yv$Of(Be^Cf1g2s zw1c=aHIib$P8~~LtZlq4QI9&aVdSh>uxZ2n+&3xO_I_r53UgA)72K%IaRzIU@P0dPSf-=}O~ zOm$dZREPztBK02XeMbYUL!CEkjkY($^`9$eP{Z}#0tEhV);`()7rEHAulZBI?D+P# z{p~Z5-97Re3{>8G>t%W05)!nXHKx&Kfi{P@{xWFCcC%);zRk2-(RVHL#eqSqIeZMa zxMrWpVPgPF(}oyFrfFgvCsKr_Q7kf1tfRrtHB7^n)7vj__tmFK!)+E_wsE1RB<`z) zt%K&AfBbc|fPjHn0Fk;^rweLxU&Iz-iVnLsFJ#pBX%IBe;s3K$>&e`-p64!lSlI|Vi`p*EK zWnZ6&ifmttPx_U|x6Xkq#$;fbreKg6r)asW7zOnWrk4C1Z4YqWC0u*SKC@ZgqPzN& zXdn3DbdAI3HF3C08kfXz!?4>hO#?!t^n2q;qXUzoDVHLdTQR%>`(iJh;UDOKVoC>bT8`2HwMbIGD1 zkR(il_4KO zt;^;OUj=O0*Z+}{r@!@IaItG&4?X&31KG4Y1DTj2GYc~e0Yqs$c&9~8D7g$sXKAf> zrOm=DXVV?fJ@WHdrz4gpw-7S(-s2>Yr!}N4UM7@W<`G+i!fw0df(HVt@OpP&x=q&>z$%fXFBa#-eLRHJ-f&9 zYbd7FL;{H0qoGA8(OYbj8mfP8ZOP}^o@WByUi)fGkC3TeVn*? zmDWuh^({@)lM@KlO4Yf`l&KG2EIhM)5vh;KE@MTuNcLV;L16u~3a@i2s5IBH>pQQ+ zEFg}AAkOEeyORCEbzy#;>@JBqNYk1$j=1iK*+V~)w7W}KK2KV|h)$1(B2=QRc~{9kj@2+5vct-i@BsStC~5}(;>*sQ@YhO=1^1$)UZ~dERitOab%T$7%j`x0iNlNgB$L1_% z4P%IiD3D9;ijwHp`e>f+v0qGg=`q5%!h)aL5w&N%6?Q=xhJp19--Zh}X?0}LL+3#x zL9v4=W-{k1<5WRPA1qWmR>!2y+dvhmpN-{OA{0wnPayBOl*DtbL9A|g% zxaj<eZG!Nt~MKMXuowZ);h*p(p zP)|zB8Hj<63{DkQD%gRVeXBh+s@2AOho~c?_8Ql}G`3HC_O^Nat@+B2{VpJ~um7Qv zk9^?!x!AR@YX_&tH@@X>zBdkckCb2_yLB3Q`sFQTc;Z^ewfkq*2UB(SzN>wzjKM7@ zgHe0^=hGctBW!QuG?Nf~cNy*Oqggz}%Qx=w$_Kuf%iW0PiCN#$^*w3;8;M~A5_--q zrUD(awECYl!|U_?h5F2}`dS1)>R6O3Dh8wmVY4YY>(|k=vrIG3F*i8hs2silcj-yX zr={_PG@Ov972~jBx7`xLgh-}u)rQp(2ihRb993_5vABMjY4se_<~BmYId3Yh+TjG= z%e}+rKuQ>V5&H}X=K)-;YltP&rr?T2K0i98W>pm}2Nd9<%#Xk0+unEc?f>kZ-}u?U zOYG~vS?=RM@a25j7rtk|F7WzwzxG!hZl3#^KN7;OS4qTZy)7U!Oc?<#A2byxKnaUN zrbZQP!(Dp9dS7Y0W2cqlZiS|i6nBU~oCf-Qg9jhFW+$KHlvu4!8OIUPhNhe0v@u9! zGSsI}XdTEdRi_1jI{#R#Bm$J%Esgzp0$Eg?fOxytrdwp~E@jsr<>cm)6Pm+D&S1>u z#t(4v!gsT}^E@dgTr)EuILWjgx<*JjG1v%Iu;#u>MAAr31BkQWZ+rQ1dv2O@jlnHU zTPOz(>I8*4<5?gD^@O!%)$rweoIiC{HQ<%+MJ-jaWpGQW{hvFSLYqG_F4QqXV;^mlEO~L&25Nf%Aj07<^<#dYDYht(dEoIov^$4J>0zU zUYfW8jyg6Gb*rbA~wB-&_G0^qxNRAt_Ht6hO{K0knxFW1xAZ=bE z4kzRkNtFNN5CLkh59i8Zff~eZWwZ4BXAuqnQR<_{Q?Z#LW*R4WgIzYjZZ<1c>ov2c zebXDRw|{q7-T2FB|JB-bZxz>n>3+@=*)~of>8t)7^VH$u0^+^*Ha2eDJxRi{= z5wf^h)@G3rFF8k@#x>zTvUe82k|bSr{;sw&Z^Az-ys(~1QOr{G>Om0EDe4pjR59#L@c)P_tq=A$o$K%K_PK5(!wnN^7 zH-^4Fw`EC(*XZtZ((VOu+L4l=_>+AF25R}9s?vaRp3myugEt*)aUv(@PAJZ}nNE!J z*-m+kEta4N-4PIX z>ak{|-8^&O%L2#JB&-`Tj2^XFAUpKh7jde}en?ddKW1{Fe8g z|BFBMKjY!{!pq1wR*8}^`-jPx{VAnFA8^hA#lAW(j3p`8&m(Dp0T&k5Zn>U&cg?6V}Q@q%S(O)!9q zDUk>o+F5e#0`L;2!KHDB7JHy96*2%-1nRc)JlO`G5%TV*Jas%im+a{pCFN}b-G7|AJu{Q>VB35Sv=vc7v6n*Ohg@Bfne?q32E z-|z^|2kztR9twF8ZZ0l>;v|tvlH%%GnDv$VB;{|4XE^RnwxmWsXc0ifBfbS0YELbH zj(jNzK@2c;+LI<_(j!xHr0xdwy}4+dpFcn^o^aT2*l%w*3+`nX8e~Rv387Mnr z^gCiY0kA>{)}#gO5KfIRg*sGFh$#Uenc_kLNX`EaigyXpM2yGB!?^oh;+x+YLjPY~ z)BhDD{4ns0Z)gOG2mb$v`$(E_1!fc{3P9aQspWSiXDkYk&+*)S@=uU>VEO*9pu7BO z4C71s*#f7Sj0txHw16K}hX+lNDrf?e9S|ewI1-aX#ygUJ zbUI3kX3U5~h|Xi5(+Gg`Qr%sZl67UeLw-&ys@WpM>Q-TA!5e_QA$O8O#uekGtr}3> zGi?7i*Ec^(>K-w>_=x4jQ~KEop9Vtir`ZCPablQ`csJ4nZ=i;FTxe~$&@A3JGVZi} zZYl!C8@wopFZSY!;7{&74+~u7+yQkM;vjfe9B!xjJk{wogPkgxUu(dPUy_A(!};{@Rs!* z2lHt|Q!GZ7X&MmU*g&N@``CUabxa0^M_j$nlY6g{rdo=k6tF`e)&4)OW}5i|anzDq zT;}0*D4xS{YB!tB2gkcF9$xdwbP-|&F=fa0t)^!qQ6DYE~59fj@fnkhP(*%^z~+Sv-H6PhNHU8w+T z6WocIoD+gAL``TGgE!8Vexgb$&*Z&OJ;y3&8>9tKAka367;hM^UvqQyzgf?p*s!Eu zJ)&tBR$4ag!NB-1WNzAFA~ZtRG?;=QYeU`qJ!Z=f=!Okx|2fm<3#P+eE~$MnqO|+O zeip@p764CV%c4x}FckzMt(CViDx2+&{o#oCo__I&%d>NcMR5Utq#z$@GQww`qOM!spZf2AW850ufw)Nq6y+ zY0iE!-TV=bNIc##9yeBEk|HkmssthB>_UK$%OW%yYkoX#Vcg<~c;6wev-db635WU_ z66VI6a?TfRW=eX}Yf@mGa%N*CXrZ9k1ycHIO~}4u`T2@A60x&X_Fpze~GhnBev6D_&pUGRd5? z$3KyG-usxvOwj!o47cAkJ!#xu0qGle1xS3o<31Dx(n)3{C<5hmDu#awoB~+sqnu

(FRB<^BZBe12=hny_8co;;$Z!+F&ec3CyL8pQ7w7u&P>&l4M=5LCn&}> zGmt#sGZ9ss$7SUK2)G`cLOf#9B2hH6;jLfiodFb3rg^pWs?Hh-psc?u+Q@vrQuhV` zT0zY~xotbTR*7-X{`P-!b^UF8|B%_`$1KjD5==obFb@Z2-N-na9E!W`5ibL6>*>1@ zNjp>;T=x(^e?qv}lXhPccb_vIu9(J1#Kh$0((42g0?C1I@V>QY*=+Y5jz?UWF*|>s ze!jpnnFGjh+?r`7YNb2s8eqmZF#=!rxR1PstqQMCjSBUo`~wOQQJVY)S_KeOa>NAQ z7kNzqM+N&EDca}6R-|3fE=+HUcM!*c>2QN;%H>?XHzoITvV{*6f~uT)-4jd3Q?`AV z(lf?^G!A7;k+|FeAwin(O-o80)3C=$!uz~o9hfS30YVcb--DL2F>4x0DL*SIXF=|i zn5rO-OqmdOlkUxxFUgnJpf0d3kR4$h2kL ze8x2G%!>^sSOG8Pv4&gE%Epq9~Ou?hMcXu{WNS zqXp2?g!A4&tC>Hfon4w7W*i6Nutg=}%M?u+CoQF2aK_^*U_kX5t7Tly#-LCN@_#3T zVxHVN1KEk;I2ECFXqT{jAHVw8%Fl8A%!sJduqHBr=6eA^NIA8J_{#u9OTkL9o)ZYg^PNna{ZU~DZ zr^t3cqB_#G6I~Z^X+lDen>{9+eN3y7Y5T%JZP>nGv%WUvfeUk%4?d#rXNVpd_pdk( zYkb<<9!^grjsw!TlFb(BR=NkW`*@pS0F_QAhMbg^<@X}eb^!TIq`qI^w6x6S8@{1g zJf@vL!Zk|-%|h}$q6vp)!f9E;Q9#T=byRgm+AKV-+>poH2Qqg`VT>8PygS*1KIL;S zk;V)7r8Ua^>LZg0;ToKCH1kKs**e}m$L~HffqR;cBu(WRC41*9U_gx$GT4adP^r8*`0R!};&@>GXIw7SE``iD{iuJ&ztg!byT@!?b>Z#+~U#H6Dp1m`2pO zELA&Q!Dk`&Kz1K*H56A(yik-1MwH$dfRM8?5vYanW}>BcQhmefen|r%$)%)5?4CvgJFxeY0V>lb+TaXKav58O7T$qJ5f@1` zFnfY+dzwE67mzeqvsyJF&J(pE`8n?Fr!s%^Gf9UlhTHF>yU&ogGmaN&%0q@ccsO0& zOO`v83Fk1SB1m_x-n&8}=9U3MDr$-oH`vL}z4gPoPSNxg`>&pJ_vM@>i`N&+3);@% zjC+?z2$6>9W<;H_-74$dOOr{><}3Q;IkTz94KF$FZt;3BZkiB5C!|b=?Kv&m>m*q= z3{v@p_ds?ZZ#ydeA(5iX+_0Pl<+8F~phB^EE}u^}c|@GIzE_w%Fd!4a>3}tbQzWKz zl67T+z{25byyTvemfe*}wE%|0GA^d6W;(%xIw1G5Vg^A6pc63)v@87C`}C_1Y#Nu+ zL>%^@1CB(Dkzs@>8Rv_>S6aZy8SR6g%>2nOV14x|>u3Lv#=pV^#i!9q=BNot{?Q`tixIYSl11r~Fdo6!3^Y(DuOx8VWZ z`Ny0+`j~!siARY;VzV9DY}W*@W?z1GF{5pTyH__1ci$s8MaMl!6phn~>jzYyw9jAe z&3yBtr*l*@Nfvo)DcL=c-N&i?$g(L20i+BAltjv4qm>V()g8jX%LhWYq+dNH^cVQh zWJdbV{yu4f!Zdd%RHCK;Dp()PDJZpsbr29zqVS(IQwTsHpG`?~#(PYcm0|j(MW5lFEV_tC51eUXo&~!FEm8Lgf29U4_824Q4R=gq$FzS6 z`GL8z66T~h+I~%R#OZ`NX&fgT1bFY5_l@5!gcZaCnx;Ik;El*xxdH6;?Z9zJ zh`{xFWVPtBlBr`)CIgSxj%%*6u=#7dYuCweds^j+NnwwYu!#u${+4FGb zX%`JhmjP%*H4>*Ih|xH367X$@8VAl?A~0HlM4VHEBIhclrgJspNhm_4WT0Azxk)RD z6(D6zrp+O2Q1YT<&5Cl^K!Ic)D}!VW(Urm{%KYebKvsE97|Gev+5u#^l7 zdtJ{#xm}O6L3sLT#?yx#sk9sJGqU@5i*b8<3sM0EI3cp4ieNT4I6q@{_73wOyV}_q zPSk9BblM>##7d(~0L?%Zf>9*OM9#7+YR^c4Y0A??SEv)U5njE(@Xi5to^5%exkR+H z2e`!txcR%5OmERxq(QDGh*q9;A|LqgIi&NTICQl`~Vj0Hz4e zn;V8@U{g#c89EHn79`IuK489B*d$oT*Nodc#={m_eng}(oAESma70iC8VmVZL{3Ub z6hx|L86tMSAHTbxYlSBGVjHA@3SXDF2eNyuWo7bP_QOa@YM%P5vnMuvbiT1N%~;)% z2$HjgMD-f8y^4YXSJcc6tD@=zb}m`(XXb{vgi_#ip)EK81IDO065fGpdTjD|_93Bv zfFMRaV7P@eGE5`G5K9)+XQ$qvC8ziMZhm`ETPsC9^&S*E9PC=M2CB`Am!nw(vZF@b{fg3p{2Ax~fJ zytTfah~scLvb()OrD3*w#DfP*+C~`nw~TkMY-JtkfYC+9$$k=nG=WAagBt82m;c^V z%3Uh&xv&Pkw|$u?C9|v)l6xR~+i?$KDfgS4SmU0k3eqsYc*<<`jy3TGh|__1+~b`> zp;-ut+-Mhvl10@KM5Iy*^81riss#vW$;Pq^L)CO9O=yDP!C`wf%}&}`@9U{aXN1WN z6sKqaVZbwtk(ku7hS{tu;_=YTP}kbVrt2@h&GzaUo!)XWgZ@E_^NS3=;N*m}mY=t} zTp(E$k+Ma2p-&`IIx-$dbU4ri10X9oQ|iHwNHS1knnsdF$Qh^eb;G)f18nOP-Zl1I zo6Qa*33p4*9zV6ggiA-J<5vuuooP`m3rzz;LQ{kUq6tv25bAakZ&#T-7hs}T5EWxh zr2d{DNya;&uH#d^2eSKkJ0U270T|9F+W95R%l8TWC2iA!PR5&3jSwR#mCe^hOPGu{ zpy0qsX5mY*z9~)%T~i%Rb19pB_NA1hF#%{6ekTlK-5kGoYIZlizeK@k0`YK_dtcG| zUgMY$XJrqIwy~zX(Hwd2thMZ`tW(PpsQksmuIwCJ{K-aF^a3L<(-%t$9-LCjt0w%r)ENE;FXRrt!=s|P7wqr;IqX|`SN?} z&O{M%U#pN=ozyQtL_x~*E;HPfl&FrZnkSFyk^V95>I2glitDVoo`yR}BPkhef^o9! z%SxWbqN52mc{C%(L}0z%aP#V09Pd7*=Z=TH&|EesGgRwlA@LLt6;}Z&71~6-M+=)I zlJ6b7oPsgM{~x?%X#>-7K-}H{#QBb{U!c0QOjq?lXdFJ+o&QUxxmOLl!W zRa@BXy*F)VQu|x^JNiuoGUYzL=Ak4rEo?#$p+||3A{ry07$>Vt+#tnNJixpM0mi5R zG@g_FiepR&P?yG1ogk=R{wbHV8i6pUy?EcUFWNH<=%zgyH#ms~a))8E=Dp&Wbpgx1 znilaL;(N=^UVin0?dxZx!&fZZk&8vZ`z{kWL4bsw7$fFkLh+Aj9-R%+==vVL3&*peWrsECM-Nv>| zIY#1e00%k^;60iWIA6TQfKxK73znHmoZ(dPZHo}luwc8nB_?onZTp*A+@kgXmt|K- zbswpPtME+>$9;V5Bc;UZ;ZI=o=wpUSxxHPp*&R^d;oBAX8IyXz;hiIRheBoBi&R-q zK95POzkP5h)*LU)@Q^2TDVb*&D7JNI_TDdL`SEX}efV=w*OQKS=x~k1txZR7Zg<>n zj-&*u#f;1Ig-L9De}OjVM$-J`d;gnH|JOg{`21gT?!Ms3x!8L{XiCO7#Z;66wO@yU zSJrfvXBDYtgpf0}98rrHJg;LbtK{;el?--5QXv`>zcKgPcD6DQlQ1J2Cpq7r$FoNt z@$m7x^dTDkW4!x5aq}5Eys~W6CX#6c%`E@mAln#`UyI#`6kxLqwOtbWGZX9Y2S@ZP zHm{$t{`LIc3?}rd<)1@7v|t zGt`Cz2ShiBPK?9E{x}vX(tOs^b`3&6Gw*AhBDb$zuzvM@vwL2&6Hk^c-p>#yP828z zRSr}aa{yVSjRA0N;)_t4@G;ftY<1GG*VS+`zu$XEO7uospm@0}Ll{&jAy{a>Q1X$~vQx7HK;*thb{}sugf#9LcP|l=xq_}1mn>F4 zk>l~e?(Q|4s~g(J8Ar*tJ!u+AX~av)xdg;m#L~HB)SM+y{P(?6+Rz~VQ)K=D&Eg%z zgycue-879%#`}s`>8Q|m!B(t^Wa4QvBWYl}zT@WAciG>3!pv>CoO$|(Eg~%`ReA&{ zJ_DC3#^e{5@3WM<6ssLCS?7tJWw#6|U?*gbTq0*~Lfd2q++x?`usxOHGq0=zKSzCo z!QXLh+nK(+TIn16lw;aRwrZAOf>9-26Suz9cMx zgrq$T8#GOf(_~HVF{uH`tZ!+IHqjuVw`&|GuD^WF?Thcj@g?W;#N!7oB6Cv%7)V(L zM75$W8Cmt?hbnUdiX%mElKE;>w};Yq(we0qAQZoTdsYE)9xu+64k{huXSjBrWmyAp z_X3G~rsEpX!RQ%k;ON0Qus-3zfL(~0Yvusqbie3nJFEQuW*CScKl}T%;}^`l4Oo!S zLIRLVx8TjF{~wAmW=a$Zo74f1UH8oXw;uN{u^J;9k61H59uc=;9B(Z{lV-_s{(#xy zCov8K+q+lnZfudk!U1>?=Y+7)kkJyE#mEqnx?x<42?`iok##i zn^^V1`dKb?mVL$KxVyXN>Z@-tZ9Zr2cf2$6G^;lExk|1gg(gEzziy5{;mqob5Rh4#Yqg*Dk>|MBCuBVcgw7+>^#NHo+VYnF28Z z4G1V8R7A4BbGjw`%d3OARy;ThSk%mmT;k?u1ux_smlKQhoR~L;M+MKDQN4iw-{G7T zAP32>PfJ<2=aJpV+lkWON(zz<7F`-iam2*L!w&Jc=<$Z+dUJKXc<>J5-)D1o&31Fe zX0v87pVM`712bt>gylyzd35apoerd7Lz>oBl1AIGZGvrr@yh~T*W!F*v)1?H))x^t&~UnBvP?9N&}|8>;DZIfDJ z)}EPj;048@5+L+6%^ZxACQR{w47UtBE9IDmEslhyf#e-AnzX8xX{TUfQK<|9x!?Bk z=E(JW;9_}(uOF`reQM9F{`|7_fSAaH)xx4L7TGaoVYV|F63KyT_6)1m*(lj0Q9Py;$l#@}C8o)ytW23E zVZ5;&4uv!#-V-zFELV$Y1a-Wb6_x2B`4Tq!iNhf>rNpb7fu|1xoJPLC>V=k|WT-i6 zAFTX*Gc-{mYd-7XrU6y4b56id?=!Odc)L-|5>sXjY4*g5DRZb2s-uxQeVV|WN_R}! z(9O>XVadZsPhtKMoc(03ZeAfeF>9f9b4DhP1hwl04E4Q6`c!}Vv)116J>O$+&Ah5alv^p$9Z75%G3odc#e2jk^ku3Fm0 zRhJ6tdxcXXD^N7Br)H15u?7%M%fu;y_u#xBycwz$=zx1;#rt@>Q5aI9fL(Y8DJ7nL zdEmn*GkYyX&J-EPi1m7+GONwJdmh;tPIzOQrQN$Ye zw>K}jd-Xq(viUnWsr|7CnoG=_ zbrr=zOrYZMKG23H_X_8z1VoZe9+@`AwolUmlI@KIa(V6?;wn~^DWgypl$?T=T~!gO znm=bYNGxW7cOUhfEjk{Yx5NmbR4~fS`cw7pc)_Wc< zJEld)et_$3B7_+}?bzLX!tUw^`0)jcE;76DIGLg3jA^Rwum%WFms|*$kO@u=@C?*E zRG+fqT&r1LLtdJz)Qw9Ng@P)@*bxz2Xz;!PZ?uWLDoqg`jpHS0XFM--AWd1Itto?# z!$NAA=;Y1pM+(5yP%MfwMoHKZK6olsnl48DhFVu1|JBhge!`@W@H?nKX){H5% zn9cHD{>;qG%*@Qp%*@P87alV+!{x_s8M1eiY+^fxnUVDEuIZ{%Du0Fx&#F2#9*suP zW}|kWqt=mD7NHd4Dgt+ngU?dknPO~t&N>7y(#U>oV2<}3QfduQOhb=Zx1^Okd8|_F+{K}g{W|v*vk7) z%NPgK>L%l3K%M5N+3HXMpSAV@oi*eOhk(2RSM^Z!E>7$m)%Mk31J!s3JYk6YK7-ha zb^?ecB@Bl3D)#VWD>OCRxeQ&?|AAqeF!}OBU>#%;dd92jYH(8>jh}VTt9b@K1MAhF z*>`FWQjsY5F=yGMV32d;1!id-bkCMLIn(D{?@t5tq-xTKX`%|y5Nb9PgQvX07ky@? zAfWXxV6=S>ej2<-AIFdE$8GQWK4`8S#a4HOjm;g5%Q#I8$PLobwNQmjnP8cXR9Yn; zOun-`<28WX3l4U2fkAQa)gD@f6wq-}HyTs$YVnkDpRFLaS1A|CJA1h};fIs)@0>%` zyMSu2&g`nb=rac95b7+hTFui{+A68uA<)vKuK`;OQlQ0qHdfXl2CjOer%TvKvY9+x9MW!whkd{J+_jvSV4}Z1Fzqys9&BM$1S~J) zRLL*2GaD&pt}x~Ew+EE~fi9S5GQX(}Q3cPuTP|<#gliwiSnTY_%HCTr8V)h+Uq}D? z4%*FW{;&nKTMLvbRK=Js8{J3Cv$ddg24yK+AjREi&#z4uV_6Zb4 z?-Gig)9}4B$U`3oR@ZRw=&fih9>Pw4gslmv4aX6U_8uN+G6kF}X3DZr1C_QMF^~^} ziq!mMj=lzHojI9vYg52fXuAi^sZe)fXe<*n!|*CX{~XHR1u`)|zRvNIeZdn<1scvE zg0g~GeU+Kyd~RY@0-m8_~7Y+(sXe`R^Ve427iwYV)O(l)# z(crYO0DG8n&KzqW`H>&-n6vB=KghZ17&s*iG;WXyfWxTAYnLtfG{3)=;-!Hytb?Bd zLg2A!h-IJpGC#PAqI(YG?YF}Zu28kUdgLbTUAqZYY!T3U{SkFqtfClOz)!~D2bU4XoA9G9-Q(yh zO;!~MuT|{tDQPJH0n#8pf!K;oZJXe=pOR)?vhB6EQe#jX$t+a#+|Ty=6x-74ih)@- zS*2!u0z3h&09iGdX5Wny!(+*^NB$t^77d#a8?&d62LT+W*I*`+U1c*$beGH62s)c7 zU0ety0u}!~_~9n1-ld71p*3S4`wp(+;IUf?KHYwSt?e$eVC2nZG}kwFCm`a2;juEUNwmX3Pl1_Qa;_t04P=+0QsLH&?V$34y6) zVq#O6S!-{h4cNGpPJ35O5>WuB&Jk3f#M^8!uAfpb1>nUqog@ z6RMx*Bb%3pqc})gWZ4x#498cStQWv(#V`dvnai_@QRE7jF%tzDl*tWWgFr+Sqfs_m zzYEV5&xcTU&!d>Iv^ZMF;z9!lk4*EW-H35$P5`Zk{_Y6Dwa{MJgJye??5YY55lkn| zMT5q|L4sFvag`gdQSPAXok!U{%aunz+TbCz@Wc8{$7r-?vvpBvy%rW?`W$NKXmNIb zijN65+^7f(WKdC+W+l-sE(?$ZdVp4y4SIjg6_EK)~dS?Y+qAgp7{+CKu|=tu4Scj9yI>PZ=`;r3_Tc z7hU+_2CCj=6x-8S+F3MX5Bm=s!lC20F*#s&cZ|&dNkF#0>s^dW#{`hA&K}ApTxX3o ziLWu6M;NZd*X(QJ%94Fm#Q^XH5WQifMvVF&v8c2@ra&k6G3p)rJ4-ft+h?vB2|OB) zn4y3l7S!0UR*h|pQzc8lNQWHjomp%%sI%lS5{Zay+9SJo@o;0=9LVOyY)uMcTslxh zz$TNMX(pdxYoS7S{eD!t*QSAcvZ8znC=KpOoLCDiHR}kbZx!BCXj@R~5JrlnUEl&i zU@WbHwPQD9`M@cZv5np+gIhd;eDMJCMviK@iLyUsU*{&*%kZO3#G=PUkqDJCUC_jC zEMtM24fyt1Oh8~Z@L5TNG+bu2FN5}uN(8F~3uR&Dl?Nj8Stbn_KLbu#cy0Dpu|Zo$ z(-^c5HG;-^$2#_}jI$h%78}z3P~Tgdr@DTa%bb&$6-z`wOu&FusKL(JcY<*(T?1wB zB1Y$)irq^uMmfBW{?0Y-GHxua@>Hg*xmeF^bADzpU|H7%0b0F4z{5GFENyTN)$S#X zCxBLT&!9n;c94grL|?-1u7jf0{zZi0HH2b^Nh1+Tg0ys6nz*uzdBth~NhYx@Pwvyy z&w5XN+iOHR;p^m+y01S=^lAW7`N#Cxuwd2kJ-%;(@usbc>I{PufI~$95AbIKrcX02 zjaP5YV`L9^nHTS)3jvNG5(G|0^eV-qkyhv$XzGWje-jk^xQG7fr=qy@JPg-g2OewI z+u+I}kk^O*#yUU_gkHe=JE56xd#1YKGr35BW^V}| z8Mbo<+i&`3bl-9ZMi1T3(|l|^#+-02GjSMAasg2!utr}|sOraE6oW04gPmG3F(muS z5K=r4gAs7f1}2kjg5!v+NQ&5uZeaS-xtENQ)gWP36OG2pes-!5*w_*iV1!Fqme~U< zR%geg-znRvM)5Q4mPpcE8-PzYw_D|*0oynSvWK(G%gXXXReB$ila&+;qXmHsKkJVl z(DaN(MC44!C zXW%|%IyRtf*n^brNH8l5Tvt7#Axc{&Q@_UDeHb^3&w*@S9(cmH-SX{U{*j+j z_~un+5KZG{krPI-F)O=}zRYAz2ngmKUxgZ|cnlg)=kCh$$fR zXV!-pm!Sr~Q1>5st*XeEBcO2#JXC1_7k~i~jaNXRk_P;3y#+G8$DU0;$v%AJvWu-+ zz=SD^OKG46(7eDy3j+e$1VqfLr}>(ZKUtgI&snGWR_m0RF}`P6y2b_pngDkWWdDC< zUhcl@+m`OW{p&vao`3m@$CZO?_kPrSAN=d3#m-G3Mzf_xAVfn5Mxvp1iFj73dVWq8 z1LUe{=CW@cXklV**0haht5@RsPRTPX!Tpp|3v@B%3dh9h5<9eRoL!~VxWzPP%Pf*@ zL}v4OZy2!Muh`Gkt%Ci8`e&McQihj;rcuyGfe20P%Er#rLPe>(lf^>0_-6G|=Th33 zn{hP_INM!H8neC`bg!{F80kozbyx2{)@+`}=MiMDyZsyAql<53 zOv!^Qn`2IpM^5Gq?W(yZ7qNbF6lmlG(+Io4P~rVpQhPi$o`8?tP$%p$Rlp_^M5vZCMAPJ)E%pvp?(FDg*4n zl?EDQK^}u~Fsw}|kggJnskqtTr2UG984*~MOs!&M!y;g-SCJ83-WcP}6TsS@-1l}p z!7PM`H(wZHqg!G)4mf>fh@uP(yxV>_Di`fWZ9O*|z$yCx&78ZbGdMHp%CrJ&(q zs^P|i0T4^@0zB5&5)I(so(7gX8IJ64p{gv9v45*XHw?n8Pzty*tBtTZ#OM|!07GNB zwq4>a7X}l68nJE)R&*l`AdgDVXK%YS#Kv~X{z{*$j4EgjXHGGPVpmqXrF>=9rPJ61 zxHW*JE1+h@Q2=R*&8{D1uqAEJ{!*=&>TMz5{m;uB$mRfc*Eb)(=k~Apt_gU(vK(x@ z)Q`5l)s@{NSr{OT1tNu|zD{FIWj6pYSnh+wsrL9Z>$dUWGks2CQ4>q!Wr-^q7$*}u zGyRzToircf%_cRrx=SEkz!?2GqE5@N5%Kl#xM*U=#D1a16L+#nlM!e3hXN_P;!6@pV$ei zVgl868vT029xCzHO9L{nY5PtyN2i^$`**ov3GZ@X2mJ` ze^kbfXx;Mp7NvRluaxIM=?~ud6?c5!PglFw?hd2Pr=#5dQdGUY<+zWc7{S-zmE}qk z4Y4i>C*iU*SRAZNNwWeW7=da8S)4LP7yv3Iup4{>Y`Wb^XOC;#J~7vA;rU;N5{`i7_P?>_tP&hU*t6N=uu z_@a-h7$Q_9nNL$$QUeqR<9+TpDF{FW3d<5;SMLF=~$#etJtR|jo4nq;r+QeMrCl0kB99Z8RbD^N!boOVa2iw}Q z%sQq`AZP*H0-+unNr?e0$P5!hG@37y-6B&45|gC^)@`)$N7d^ms(!v|Im71m)Tcf* zd)^a%_XA$`FW>pwYdd#7xU&0-f5}GYKXo)3EEMIKdk-@gU_2HAgZ@yPbv6S}B|QJ~ zSf|x6&$c$ePh)#8R+Krzv;;=edIkSQAuQ{XgdTcVK&Wj-X62DgP{w;P0IPRWEKF07 zjL8|-3N(RI3Sf59+T+-0VyQ6_oTf%M{I(N|6L4xz>$&J~>+uCN8V=Wc)g-V#BIv#O z!T{ZYN0x)^EX&A%+26`SuJqgr?h6EHX_=i8$L8LL1tI;4X#A04j_VuK{Km6i5e)5|S_wIP{!Tz&f*&4j%dq$)FiK*L2 z<`pYsH7Jx7*$k9HK@+u-UMqqrUmCgCSbz?;Rlwis!0c8du`#(|dd6a0J%N}QmDP5M z1!#5B`=FIcwg&+%&;Xke!^YfHCzn&%YwP=zWoUXO8>-bp^#b0i0xoYB6B{E$0uGtj z;R7voML=5LD)8Xh0UkKBiz^#rY;`Lx3QSfjxs7U2cq}hu6ZY2P7r@!8V+Q^O9DsFs z<<1F=RHt$3-ngJquw$#>q}H`h76`NilLY@|%HV;7@c~AQHV3kK`Rl*@ zv*wGR^t&JTs@uQ)uh+UycwlMw)qfIpuYCAuJkAkIG~kifY^4f;<{Xa*tJwtW5dp1C z>KnFpfr3`oII$uDLx1Ej0e&0bs##MVs|u=1Rw-?^3e{^*qy5 z1za*QHn}r~iKGE;RVEb%RECAe>B~c0*(|t1d2F?fldCNR9~tX9cXc$0kL^+~HU+PU zSi9xmX-0!lsMY?StZe^EL%G)Y*}9f#98C4rMRv(%bi2S}4-p|NHh^ueM%?P)ZFUXF z1&4}QtP&{lnwHG;Ez2}swU2WkdxtHr`lqkI`OkmylYjVq7M}mw2!?X9|rN z6+iflS6C1q4HZ7%e71 zV+Lq*qDkBSnvVPHl}guO46a4z+4I zFR#DjTbA#>`Yj7xVmmJ2JI5O~9}y1=fI&OCT6Rsz+s8BY2HK;(DrB?BE!M zT}h10GAaA6DFRNmPZ_O`a^rbb>m+*!Z4PAb2<3IRfBk#C`X9dhm&?J%z13jj@v+$c zT<7~s!$A+x6TBuycZ`73$TG#12s~yK_6{jo*MHB;7FgAfOXy#kg!{ zSkTPa2vh_pr z@D;$7j!9rAdXLKM? z0-LEJT@5xx_sRAqnr&lU?TV+U`_>T@2R;@E!RpyKexMdkBhCa@{hOoN)A|@Fb0B-$ zeJ?zD&r^Q#d&=R~tHN;e#VD_TGpgQUU+ki)M(`A^Dg_P{wAN|aG`WnPOiHVgig!uO z3f47hcZuy|w5}6lS!G!u#2l@a6IeR-p;$WkF=*|7R|K?qm`5zO5Q;tz_)sych(f#TX)zNEf~Dv+B46qd7R-oEFJFaL+nxaSGq_s`p}{j~?mYp?n3=y%`09Cb6EpXf^fL91%^)S!C# z-T{&9iXfF|WK#7?lXb})>)Bo)7-P-q2!bFBstO*b=30BPxc1&yJ~aWawGRY3s|e)? zD6Rul7v*>t-QEB@{prLEVqF3un#z9LAMC#MNq3dK zb6*l&V-Hu}LWM>apl@7R<^)Fdb|FuovU)I1h*F|evofizMJ|Hxi^=#HP;rrw zG**33y-j+cdw~!j`GitF>piniQrR#9LCJ1P_i|T}W@$(mWEwYjVuv&LD01u2zLh3g z4Toc^Eq)pB->r4lSvo4`E6SJxE%f_@ zin9#UrkG9@_bTjr=ni6teaH`g1oof&AnZNzES_y7$(;0T4}zaqT@|=M;9`Ylo};zAhR)&lN9*WEqq+Lt$Xj~=KY**Y5sDrL{atMD z4AAY5dD!dnVh4Mc7tvl=M%La3WG(wgD8Z>_67-D>tL!%}AjfWkOXkcQx$&d!@pMEK zoR0!THP$=Z$Lbtdgmea^Y*@%)CJet#b&^Ei#4Z3X3DPrq?Qg@%qkkCPZX0Q`k5ryO zys&zN6sJI{Ifv6RflsAaXXLs$@f<3fl*ypvSz7sdyVbgw8!nT1Zd^`Fu^P1VxU{mm z+UYM}#Viy!IUQjDkkB!drzodGTwE-?uc_p0+U)|%OI`H(eYCnO$a@<= z*2DSn9!BSf@ZMVuGRpLr5YaY~1GY$D`y;IaUsABNe6AEpDbQ&@%59@0kXJCY-Yvuu z49CgzQ95dAHHKxpI-EfUnxuqTnegalM>NJ5p7QcRK|rx5!-}=l8_y&t{ELgXpU3B%X^R0afZ)!v-FFdrSf2QbKG0M zJ6%~_DOy>z*)9;~hRbB08<*2!C{a&OP!0DmIeCWJ=~sy3SLn8YE7!I$Sig1O8M#?5B69&QC7`v6eEnPK-um0nyZ(nL2AeXFHca23 znw(-bUF5!|xEPlLBHd04OG{n!2TN%8){u4AQ6tBjz2|uL@n6L5Z~rXf*{9g(&2V+2 zk0Q@Bu})yCv_7l%1#*t~s{!27^g7h96!)?vDHuYLy zIa*~UfD83*5NsX`4y-I5omKeFvoW6SP2^{K9WNx;DEC0T6X~&BfR|p9gq39-=~02n zeLAVUW{V!AJiyLOX%r%&mDVAkc}FZtL1;E9R03X500THfz#Bdqe>Rs?hew?<>>PXi z{6#B#ak4p$-TRaK_aCPB?rn6omL~mnz31b2A1O@T+;EvJU;O71FY-XjFGoF*hrX^x z;vFS-h`hZFm-n%@#qAT$w6u z1-xsqqUY43U>_#KJ@M6Sb9mGzCV>S459NlDb`D)Ag>6{s+^3)j=bN+1h$;j~kQl7Z z(-Mjs=6Q(xZbI(#1h&y6;>%ZK?ca0^RhdXPmD=NBjk2t|g&GG{1 z!3y~cURbtXwn+;~Dh?_BH_j&xuTLBlQhn;OYJz%tj_85Z z4FZ&xZOQGIy3BwE06}OEC~L_tAdHbvAXO(wu7h@W1MT&z;)RWerVw9x8w zP|t=4$)h#6iO%vpWW8&E%aOtap&Y@f?tWF(DCZVKM4m6et2JLQz^euL4v-a?j)ypZ z^EpPxFW~Dlw2Op|L4iE$!m1U3YM=mRbyP9d>r*X2rBsldU;}3X3(7!8jNI0$k18H7WDjVa{^+J4S>mN0iA2>ZISJ%>WG>4LS9UQ z3>%^LmzhyX0D;5y)i!QzIY}WbrYVi5QO@N>jIoPr8*TajB*h%&<5@W2w{E?RC`X;PiMOJD+_IuRr}Wm>&EBOT{@h79r1|D?cOLE(jno zCzpGDz@t8_0%)@45L^q|RtlTj*6iHM1kTIUpR=R`rvT-6EM;#@rPLTZ4G0h%BEm{$ zWOKU|Q8vXZRS#_%rBVEsUk!yI3pfQw4&E&aFOobm_W+P{v~njLvrfxn(Diy>At7ah z55^RjY5%ef*Qvq4r%Ht!-O%y33q3y0#Z*-GIgqvBv$mu*vZ5oNd)Deno;AxnhEtC( z4s!hLMX!D}x;|OG`e^q4qwVbRt#=RdjgJw^AwoTsE)^gly<_&j zm5UwNkqqrXl^$xzqlMWB)%XNqe1PiWC1%H8p}u&IJWa5&wt>OMU1V2&7sulc-keM@ zt0BK=(X}v|I*ez4&vKzmtgbGj+h0c3UPH>4Fquqo@Z#5a`SG8@$+N$TY`TMuC17*C zk6yP0$!|nay3j^%hfFJ9bxkoure4^2keFpAc<^CsXo^L?R!>4-+h%ZvL@+YiB#Y`k z2iVUxiGQ(%GK3|Gf>3E`1H${7)iQrlB(C>PDpA1D`Z1z{U$?aoO6=gnJ3ZXL-i3Dr z;Ifv_ll|?&3%IQgGPDYhXl8phy(sSe=1J#?6-}cs3&-PlQtLgMCPbv7&RPVU`*7(aZt*}1u1cl+4S-DC%0{0eb$fLNXZ zVFIV(o_5;MI~kw1a^XWx5QEM_98Ch8?gyf@g@kH?dUke+1XQ+r(X%Tr@#;^ z-5ROB!13`hcHc~JK5e5rxPrBfEew`7;5!>|#X3UDadP+)JHP$Q^Thq1L4EQG*1C(t zeGh9ZJ!DxXz*X1m(`@A)34ju71>EvnxNPbr5iFc+3cxk0mX0|uJ3YyO8K4V`2E0Ge z6{rr&_nalW>Kc&l19{ChE&x^$gASm1#`y-D&HL>PYyDh5mmnec3r@VRhc~)1De3-= z4wielambzW{3d0@mpfxokDa$SiN62ZAcIb+B8QCEV-zj;9jvM?_c zeTDHJ;`A7?8X-v6NMM-mG&lEl>-f9_GTU1ytaN!uj?N;^h5>LKXU0${0I8B+jCyv4 zYJ7-txPyAQ2Ui|osTgB(Ibo$6M48~|BI2~{V~SPrzILDg3eP|OvpC=R5xVIJTWc+> zul6O`KdTa|I*_QSWqp8vqg?^W9H5;AHZ&s`aTqT{Jp*8>x?x+2fM)9u&uK!^5V+42 z06?^`6+C&xYl!sDn(-Al!HA4fsMsVP-0YyIqK9yaxwEwv9^UGTR{zb-4!Uh;CZDUk zk9cD{2Q%^J_D?I>R^Fe&y~QFvVltOU`bt*x(C%MBDpv7g zZ;0>z@E7stKmAcT8y?K=-CBzuJiOMrxfVLTc#vn)mq_DRNV7wv@=PcjF-Y!<95*P} zEIN2+xoxo}8c>?PGfOLc-3X zKEy_^5{^@=)58~Ee1R{1wwV0(HMZ8exPE;LgQaB&8-kKZ+lBDc?2JO~#q@q*lm~KN zXdlec*rB{eD~xLzDm>klb|a)n0L-*E_IzB+;E{u~3Jwr4#W?^WdV zA3VI8J>FjFtapa3R{6@u;R}Sx9_on*2_V%oAO({)q35bViNx>uPR8f$UI_p_}0ItFKmo@!J3&{Ez3cT`y4_HR_yYrS6uf47M9*?_sy(^?u5mnTr|GZsE?~#Xi zS-FD_YME~@a%(dgK(f&1&$JkDeWQ&J@Ac(e)X*wDqPhbCXc6+N6-+!>#y0huqeH>+&yhJ^Gg?e&` zFgpioDJ+pfV=A_KojRQ!+BOH+Hn4Y-#@|9aKR?yh9!fE9E<5x)5EX?GICO*IRU}a~ zQ5G5q2r&rjT4q&FPZ6d^s742pPpgKzLbmm3hVEbut^O7&*T=;qVO#<+>!N5cNs%Bq zky{3}r93pZr7}RAJPzn&Bansfz=VJN#)Ljiy(kXQ)iq!%ynGHBjymN=vCL-1(NUj2 zSuaes*}Sfad=qxjt0ZiQCH10iZnils9f0J{I?`H%+yi^ZGFfS%d?5h$%dbSnM`f{r zc1u7*s+LWe9(uaucA5N@@xmO~QNU5jK$cNmR#k4fEt+)%+M>uXtus7-Q{dOHd-coV z)mi__gZjz6jk)(VXm9q%MN#g!G+9(0cM+z?(w%}(0tiq~9kLM1cwNzcCpN)zNtf`S zPq^Iw{`{Z+3+VOwq!u74I037DkCmQfygRbdxi%^&s%aks5G%xLhPoUHu*LFBz^pub ziu(L($zgOm1^O$S(h@bTTNq9wLMl+S`eHs44?)2cZ~*d*+Gbh^P||3lcp=7xy_cM* zlXxtel0KB{VQ2_MzEAQi&(X~Jh%xjepfav$JYWqf0$ZF2a^tDo6v>BqHtLJ?^|^A& zzSx<{NVhqtNpGfx!vZ3YZ}#y1oj#U&nMvY15DXY{6HwXQZSWh89+?S^88+Jck|}*A z!Q=44>O-C7T=9*(j_kLUNj}8&uTd_wN z-W$Vx6*$m^oIUTHx#r7eo*S3bQnK>Xk>mS0`ius^60(%51xYqT^KJmAAR!8kt4mxlR;NjB@2binigLuK_0iH3a5+x^R*bKZn;L*Cnl7c4`v{hz+j?TnuUQ zipMXN$7BT*VZBGQBx+hW0kfd!yD`4WJC4@R4hFYYlV>Oh-Y&Cq1>FG1f=AafQ`@-VyEv=*IU%@nXF)lqK zS^{XTP9M$}5HQftHuI`L3k$gPToMB1t^qqTPV;?Qwe@Y(DOXt-^)3_i{@CPw0+G`( z7svO-qRH9GqH7goBv>(|Yb+*7PRCRGTpj8FX(_sMwIlyeIxWd18ikJ~Rk2tU1Dwe2lC%iylgmX=;FZZ5whf!|`b0edXzs{k8hcZI@SX-B0g5 z*eY(Xht|LyWKJrNuMwx>eF^U?#abnqT*K4jBrJxsV4IiyK41|jMylNMxXWjr8<*3- zFv@sm8%sbd6YdiZ(bCDIw+kND%4&XA#d_+zQx0DDHQ~}TkzJO9p z>-NI?YHhU7@?ydJ8ZW%BeN^K&l8z9Wo)iEJlDf_6DfB(d zZL^Z$nP-tfizi^(z`qq+mBOg5JU4fV42GP2H!u&!Ov zhc9}<#1=-j%jf0+0w5Fb8hW}kLoC%x8!gDT&oMpz3TeEH!p+d@uOREJqjCd`t3q0u ze9=dDuqu6W%aT2plaPhJsXP$DIS|9 z_8Cr6*PjbNC?)eArN3cSX>G{f41L+JNe>8#Y8yo^-v@(kh9|dsSm_r+ZoPk_D;i{X zt_kN%82sI~C$CM6iTs#YZzUKw=C&B>n(5wPhz>|1A6j{IIGtqpY;TeKTCRuLow@h* zaGv|x%(pjUtCtSEn=TUfuMnn(qV^ifF;Wok%gFpN%x)^mQoPZEsD3xyumX?JB9Ffe zQ^w!5D3P(QVUP)vkSuRZrJD_xO8%vQljlFg*^9p>V3zl`;M?nS8z2)v>mrHw6nS1d zT8-BbMU6ws>GfpMLGVhvwCemB((Da7?EIzbLn{$?`v0tG~;ZDP|hIQ z{GQh&YujldAu|_qpLCdW6X;T;hwdE+exCOZa{N0&?AvJ)3Q$9gpw;lwSoI@2UCp5? zrx@@2MU3};jM3|#;^N?QOwV7V-M@;Ww*}W;hVwaphJ$g%KtNH8Kng&dNr6Dfw&Q)2 z=Pyv6evNSP5``P1-Ra?%pM2dvKRXo?0QmDJsjG~3jS?AdQUfbN!B2*oRFgNDp6`lg z+xpFqu(AF7=wAP0WXq3G`&IE6c)d#E1n(Wgpd@Prf0Li_&QZD63|Hg>9BrkN0IA#j z5vtSL>L_@Sv*DI&d>&u(4X$h<0)QhuB+G%llUlk#UK}BPcDNc&({{CW^Fe%kZ)@&- zg<{|ieHsaPAx@8w$_r)POZX12$jIw$RzA_V4(`l6)ObA4#YCR$GSb&n<8|+NWN)jn z5@#!v^u8!UDBhapB=`_(rBkF-95RgdeuUZXU&i>=_b@(vhT~ViM3@aF3l~ca{)q5!Dklj#!?fo*rSgP~$jzf%0M(w>C1jzS0-nvy}A!fF2l?T$#^8c|-YG$Zo zj!u6AsaQ?L(#`tby*q9`&Aiy`UgRz5d3=R5-A9-mBUM8{SocDk07=f!P|sdtW-IL^ z>GGQ3EI&=wWZo%%jq1B>QIa*yXYNw^EB>-OWaK>A)*G10AEyJ!F)_k=PDI8Cs1AE3 ziQ0=V{|Ahp|Iax8^1nfxov8p`wA2WwN0`9#>h-``aXqAfSXT(skrW8Zx%Jx-8|!OW zzw{ToXrG9Uc=@D6*O%R$~F+YI=ckvdGlSxZR%9N%aOgki`BUgfph^HOdd^*J$JfIKx82y%W{m5 zzXFcFhb$}5%qLlo5MbChn${?*03g|(Nf$*!DpZwv6BT?o!`gBOo7>+)T>B#&`;Tx^ zZ=s3>JtDn+T^e~U9up9Q0GjAk#d?Ce9HXwraEOQiJxb4H4MT6XB{9!qWpSJs0g##W z2C3F8%nCGfNA`i|xnTE`LSgrtHZJnV#E^;JeTI`^!lyeGe)&AdNxX^Wtq0iLyaFGm zs76O3l~a$^`>IExv7fZ(Q9a0p3I=;IZAG%VFrPP^L{VsoG2!rZM!H3l>&D!gYoH@5 zue|S<&OA3RsTq%i#++F+00!___9*f_2a-eqF?uPe^gU{bX^*0I1elOZ?A1{%+VKp> z^x9%HQ=AZMV6ul+xr5F28{EH^KqHK-BIWIp#f zlPu3Nnfi_@db~WW@ylm5zC7%sz4io8-v0csfH|md z@~rFvIi4!K)b$^Z^><-d3y^*GVvMsVe*w2nG0fBWj{r8Ccs%V{6n+rqT0RM)E!;_13U_%vP{6;z|{<3L-IU-=MQiU++R zsOdgYXK>ERFYt6G=T^{^wVsnzbEukaJsUx!Tqb*1aFqJ-6|VJ<@#tn7x3}-3xbg=$ zZT&tD>upTZK$_+qI2B2Vy?F~+UH~qK_c`FanWoQ>m|-}9iHZFX&;&Yi!OVSa=-BIq z_at-jJfsaUv?NHudU)&v@a&Cj0+}>zBf#V3QH`H}UE%YCCFCoQ@!p5u!3U3SV6#7k zo4%0of%R}#+JUF zFpPdmC#xZnFv{>$e~H*7U_)9eYUr4LTwl+y+!JD@{FPNN#$F%jVgvv|L$xG2Y0@!( zdVGjDdW}}KgH^wWdsj-_-`>FL+)FE${|Ni>3C_Z%sMh){6C!UWn(#f*gLiowS=NHf z1ki*uO8Fpyu}V6#5(+4xSYeGnK=3xi*u*>$pZ-SfMnOxflMSzmxiSl8tUkiDSE)u> zCMqPKye#pvuRV^c8@PJsTXXO0iKxqV;~|20Uppdq9%m;)i;<9W2HUK{_}pZeLQGtZ4nX#yIKjVGp0(KKQc*bIxCfX47sUMJfM1aJt5yvJ}=feiwX zk*XR}am*ZWV?9H!rBc!SEJ!}e3d9@6zIbhtdn*y^A!OLZcn^N|3jOd3x0X)v@J0)_ zwjUth{6if2@8V#xfobK@9$XR9ZPs2w-daM|>I=Ae-;&QE@Dhv|>Jq5Dvy=;ZVNGcu zR@u}5OX!5nk$v-G<*a!=0z~d*<|qORnf3USFG_s6zXZSZ7*9UM_&0dN~ z=A4wQIj{?%r?@0}OIB|A$-F*RQAxsO`apAV?Aas@M8H^nH=pfJ@x{&rlbPIa8A99_ z5L-0M_weoeeSCDUhk2Wm2zWZkBHr2IS&4SxP3MLELqQb}egjBIdf6;r{0Enx+0QV= zhJTG|s&=xF4G$f)=;1yZA@ zg@@)nb61{~RAzYq@2F6#PqnOk(}^Ovn8zSA6jVvt<)_Jm-v~FU6NbinnTtpnFsui7 zJ-&)l{|FApxW0Xkt&Jt19>Rr1?&};8i!gnxC|bkGgCF6%X6L|=j0=afXkHZFDEKP( zEjGb81cKmb7A2(+A`q0Cvfj(3@N;xl;{NrX%;SXj$bE*@L5`K4!`?~2i-U=_ZF#9` ze)(#OTbnI(+dwdL7Z&V8^i0yN!r0!|r$;KR^BeKfm}BOqTy9Y(M-K-n)Ml*Ow>AXFCYP z7l@O6$uXvSsMLw5VCp2Fcydv(lOTb7H_)qsY|B1|+LnyUYm{T;+>%BZO@k=de!V+I z6hLx#aHES4?hY2SHf6@c^($>8If0XKQjROU%<9}0Z8iXq&{SlN+O3S_SJQVX#9{K~ zWd9{JZd^_Sz+6^@r3WO^2>8&Usb?=C6xfm@1|`eViz8U^wS!ng3{j~S7_=iY5G+k@ z%nIZgu9m610(Mfilk0_qeX-W*=V#K_>1TWB#XVf_pW@#780+n0oa}#z&p!VW&v%EI z#wAhpD6$0a^^ZX!s5rpri&@b|hBopx+hQnzZMbBn2%w}}dSYg2YJ(|tBF7|j@!9Du z{P5^^@kRMZSi1H*`0j^y@q6#B;La7G2#2UIUP|aF!#b4on_{%e9Xh@0>FXiRM-_4^ zNFafWrGPkUlhJ#(*9^z-&pDMgPblY>dEn8_E^b_D$#vY`YR|Vja{Spvjjvx%@Y|gU zKHnKj#S#fJ@3Y9H(H-8)G%2l&GYmGrBh3QDr8Ccs%V`N5Zw9CN9IG|o?7@spA}s)h zqmE^>1Sb0Bc&%(IFp*Oxr-=e!lnaXJ~*Sj~)) z=ag*?+hPPmUNPH7MG8oOcb37wD(m; zxJl*ld;)y(qQ;ND^4Oc*#M-qd`0lsf!ymlAj_vglow$!Me2Lj`7q!lM6iv7gI8>#f z!oh;#(b)_Da6Xi|r;zJmieHQc3qr5~WDUWcmGH*JB;dC%C)htJ<+E;I?T9B<2MDL| z>FWu8_0@3heNAz4ULhsuTvLdRrx6#ETBd((EEl4h8)HIQMJW;#g`DEMhKJPvV9mkN z^D`c%HY?6hh3V9`^=-rbHwEF>P^Itzl@k`ej)T9;n8~^;@eNQaCb9c zqc?>w4^WR@&+P!^3|pxq zokHeM^^jdSv8yc^G;_b5b{fL?MXeS4rfnQY@HwXLv<$-)Re6qBj&xv01qWf0a$Z5Aw!f3aJJE@~ ziXQLXUB=2FN4J$hkEhHvJ$L58*mMpH#FRMl4ic^&oR)|>qRl(Z0csvyl?gL4#?>JGn9mGTY7*V z$3#5|kL04sagC;M>S&R##(bD51`zl-_(Q22j{G$B-b`-Bel9s#1QAw0Ls*zeQnLub zuc6n4JQ%L$vE%Y~*`*uyKwj+g)tkc#Mng@8Wfsdug+)n8gBsdTFj1 z9&0$t$>T=Jf<=Tp&(!;>#ryi%&Jt$rN4WoR4q#8VaBF3ZPW>8bv?G;AlqWhWZ3e_j z2f##tX#s+y2e>1#N(zRS?Uok+Q|}6_N?4+6WLT=+9vUGlE3uOIl7#(>{TZ~FB+UGa zaeybm7YPuR+gn@hc?GkN?fIIblFBjbW-?6Wz3m=uY_y@pC)wX?XT0P@bzldA_&Xqb zI~NweQ!)vA2NSMYH4Fd@*#JB6159LV(%yuSja-*{S>lC(+j^`fXzA4?D^Mt}C7)zN z8-5;-g98Hl>p0$y&Vyc`79-?Zj7B}fWdX$+K{V1XkY>m5vwbXK7q^#>@WIU*5AIxt zTl*IF@Vj_5yN9!KAj4QP&(PYL#6AOgeOtT?VckzLPi`-~uN+^WUBl|!`}+3#ck$ls z0&DpJ^2tK5@Bm;ypT9Mv=TRz;69F$<34!vsJOB?kH8_G$qp{loe8@@1Nk&M5Rbvr2 zv>~Q!keWdq|4ed)Qem8fOoO=VoeleMT#+HdxPPs)$bqTkIOKENMJB)|^4c0=)`1@Q z{5=6;d^4$-``*Uook!LT_3By-`sku&zRJuFv^=S3R8sku(t245S|Oxb1_XdSLoHh& z)$>w*N7Y{=C~BBM0npUc&sU>lGq)W;fR50dkQ^l2w!4AnQeKsMfkCG@TOpxm4tc(hz6mA3yDS+;O2V`&QLQy#jI?{~GN+t+0(Rg37 z-x5z{pStT-?IA`A7_un9bHixp>v=WwB(*`0jo?oh9v8m{7joX9$5Ud6(|FiQ#Pd;< zQQ7U5;A-=Y;-!fa2g+mQ)jn4I9_}o^!MAQDY~Q{HxB6}Dgzw<_Xd4&RKp!mV;7ua8PYb8(r-jKdem ziZ*=Sfy=sLKJNj}i-*XH9dl1+(dyUcT1WaDnTk+jk=ZN;9Bep?X&LeBuSYmKtCjo= zVd-zJwoIh!92Pk)X;=F7)3G#SkBi7EzkILyVQ)?vf~WSCa0Vk8huVLedb zogUfSX&liO5y6BrLn|;R1xxNEP$aejp-~thK!U(U7C&q_8Iw_LXy(nB=yy#i8P&jS0HxwJ3Qg>mK!N2=M}01#K)* zOOVbNGRev>z$@<~YYk-5Rmys(lLuVEDk7_o91a6Jd5V3t&BfA9?7}vkLnAC9*&IM-=@|HV?tOXcx|K++yPS z9l`b;{o-vj11Oy6!vsFZmubFB_FD-b=C@!yAz9OJN@@l`xL3&IcgtEL)gIBC!=nCKuM=?r}f= zK-M8i@nB_;3zg&cRtLE<^5qzNKF&8lVyMJ$SiHMA`Bh|m&c{K)i33Tjw4Rm*_w&D0)lWh?4RwWBiI)an9ih9(rF9Ol zeI?-0tpT>zrdYzhD0IZhp^TbgTzo-`XgI|xvjwWu3<7B;{5r% zJK^bH0?t1{Cmo{|j*whN00s~f0a6zhRao@UZD&T*kw0hWiG~0H4|XUln!HOKpH~8E zpFSUoq*KZD~JKSa6zLv-Q+I@8~xZxGxVikl7u*q%o74MI$kgs(#qtLliyx4P(cNwwz%LHaDeeSaV!=(WYf^yR!E zAH<$cR!whSkz;`b1z#Q)BVdP+yc-AiwwfK3OGdtF;i2cneoQ9*IY5KnPBUNT05IJu z`)KSG&_---XSxkXu;TGRFqK6ZY6h>z^5R0UY=1Z#R(j7H1Oi{9f75~tKiAS$csyW3 zrMeC}4?sP&dJJgp%UR(++k*wW?U}eHa0t@YIQ!~PAsc=S_vZU(#Zw6*6LQ4F&?+TT zBN`3pO)!G@%n@=S;5jqgMXymnkwa;U+{qAO=LFPnZLN)7+j9VkNz6ldVwrc6^sj^D zzF?u9+%PRrVbIM4_=?WtplJT z5+6_pCZYAS3ETwN28$oO4zk`s9sLvqyh!(O)+zB0`^OVWj9agwIiQ18U5^fq(V2cK z3b%RQ0-QJGW(7fg@YP_3@7m$LFl1Q67c-20-d|hYgfrgHtc-@QC->VC5CNX&&3T27 zpO5hM-V{lb_`)0G@0<{3m-{*Nr-Z68nnqi#B$J`Lo8{_!A zgaezMzj<+ne!+#GV^wDj_FB)f*(PPWC-ke3XXxenH0<=U?a1GP?w9-i{9p#}JOH*q zc@78$K>e`2W0Ac^2#|f<&O%VJfb)K_7R|B`@AlDYWr)eic{(j`=6~un=smZE5%0Xd z##?9aJTjPHM6sIc5Ulor-U!1x!}rqHOxRm(ez1zgsw2;%fW+JzJt3RxvO`jspCWLY zN}0q(;It<`fbu=Rcvazi5-^@6yg82mfWz}3p!(vd!t3LOtW2#Qn@)Wai>5CV2GqE% zU_+3%9?imXz<%aOf>#)n(XfCEDUdQ&ho4G@?p;bRL0q*pX( z5bWtAnliV@D&!_0Sayw9+zBm|Aa(Fhv?+pyRd^+caFKnS z6{;a401GywiBv8z=oNUpy(FzTff;cC)`RlQPnji|^XeKro{QDlC*1`ES|gNb!*+0? zw6z>&_s^;*4E+!j#OoZ^QpKE*qBf(~ z0R0%@(coFJ9f+xR!^zUT z7F8`8)5;Bi$a@TFU>X19^*HMJH-rfpj*$wYlnM}}bPhnHX&eyg7nyUocTyws=0Op# zdVC|tqq*I?n&I)y0-MWTys=K<0retw4l8{2s>IniBKHIVeouS9jAu)YGbn{9Z?ZPwN!Kqm)1yo6;a(=Av(TsOY z05_qu8Dhp(<>QVe)`SYcz}<)Nwqq@jLVM&{}W(v6}+2s|dxC59tx!!;0z=DkC!$do>y zB7~VHsl7>9i?J$eO%v$c_F~||1ic!sMn023m=e#Gd7fV@kMR69%?VGQ-u42-9^dN8 zIx#0J-q-3N!@XoeX8bh=iqQ^Vi+BmyosBar7buAlV(z z7!C}Em~|c8-0<+>L)^G=!_5tEyZOuTB~DLITW4oyE8hF1xfNNKVQvmCCw;X65L~NU zZZ*RQ0MxVMVsd7JQSv8UwqX;{*4QdVuv0J2gulEtm~r@kgL;Upq#~Iix@!Xg1=NvE zQ>dZu5 zEBjOd(1T=N&<+{_KraR&L1Yr~7B{H_rrxHRd~U0c4DYof2$k3|15&gCX}l={Tyvq< z2_&LAB8jNXRq1y)8`U@))`lvxwv^8StcOV%v3p!Xlvwm6dRnF4KpMrL?N9X{iR8Dw zCq@sG19}_R3MB9k1FPLtRk?P%jlp2xrqk)d@V1(Nn=k(3k3Y^o{q)miuK+eTEG)}1 zpXa&vbCbtjA9}0VZvctaG;*a3iPkE?sz+rtyvV1D&ECw!hHgH{4q{D$Z8YAT37DJ> zX`JPDNs@ozjTJetKJb7x4{H?@hi3s_yejeaekJ^}LC4|RnwRtR+io6u7Fg;6vpV5? zlrWl6DMarFrzCMY6lHQ4KAKRpNkY}P!0Otyjjrf!2|!toqFHG)EO$!c@K}c!Y~`2N zz@KMDn&`!_6`sJO}lTZNWjoSwhD}IH+j~nO*tY7vlxUNw`>3NmfHoU^J7$ z0G_^{A_T~FURlZU(Y-!yUFk6Q)%;$DbH_1t=DTEbgP$+XIbYYcTV7svy+Q|8<&z?#=WZxr3Hs>+y12w z5u!x#*-nYig!Xzyh*WlV!Un^N=$a8S%gAg?Z1Ef%hJc9}XQoXp`&$k`9~8E!?}&Q+B`X z7f-Cy^3smvGmrCeko$ZwNx~!h;?)Gd{%Rz=G7W>}UM_WWp>y=R9xO7@7+N!omS%ET zZ!MLBdSYTlQDk{h zf#_9K_oQF`09M2{9E^WELU-u!0v&Q#d7e%wJ`OUhGk1(%*V}VyQ{YH0J>) znv=7p=UweNbP7ntZ0AiasCV4k~Kn)pJ?uA|RTAkikHiHAo8)FCrLqh<=J&+2~^vq&{@&Q?;P;DypiI@odgT*s7h?GZSONh!w{`$_6EaEdPulEsO+_30oNp zLl2N&{-*~4RSj8=hXH_iStv4cr$`xL`TlGa1kgTvSzV$_DoQoff#hE1 z@3^hG8>C3NZljRVkdB5f7HFWMoOsa|UKj#0oTqla>^GhuLvSj<2Nl**-4AAkFDEbo`K%&J6Lk+TM!5T0iuV~zLk z^szF?n3rq{B&v50FzmFUJTx+?>XiH9>bck(}{T`HG7F8ywksx2cY`<@;dhpnNx;;0UW+-Or`YZmtTPL)|Cu-2K7D}&Vjr*vCIQ^HnTa1 zrlJydK`@J8?;$-@(@RQ#Xrc=Nnu?#9hKrG!U-CX2sRhJX8-_FIW%*d@YB9A3hFl&1 zLr|WRd@Mt7CO3}f;yN{^d)F}uL;wBTJ#^b%a%9)nTZjPq9WS0&k$VBJ%zIp&zjxNH{tVP&@=Va-6c{#3YF?`LkZz;lZ^9rzux3lVhY9aC{!IHW052 zb%^4vO=grhVv)ju_tkUXO_(QITC?#ROwRX2C~#~0+gQE+0X81|9q`K!;g{~C4z-czaS6Ij zQf3|K>~k`=qevlAB_KI7hQB7qBgcHTSL5YTg-MyPzT~mo z1p=QM9GnH@Sz6@FwDk!Be9_2xvo(eU+=Y>Fth)cKsKF6rAYlyhTuH393WrQ7HIDnu zC^s4_hq9(CO4U|9#;R0^@pyd=`5b*eQ!bjfl#ymN6|jJ(=KmM=zB0&hE$Q-H=}I!Y z?BZs6^f50pGcz+Y@9$;i{h0Y1W}bOW&D>qg;BuK!($z{CSCMhU(fg)jdfWWW9r_FZ{iOhY1^n2I`t< z4kWIu74XvCDsC)!g;SN{Z{DujC%-29gKSo3_H6RG`BE4vw)|6)n9=-^KA(H zaa!fn$`od$F@287qUI|BV_wWJ{*jam~$9(dNmt=zZDF}j`tlGXa4A){*4!Z>>vETZ+hu3 z{+6fye zfB?-Ob}$~aV{0NBccJ_N((*o&V@>UT&Hth8)LT#{XtFL^#2$|sHyC7IgI^}C#8^;q zbuXa4NqS4cpv{sqMAabgYcNh_c{q1BCVmqUXX&~9Mu@FikpA)+_>F}_8t@CXd3qVA_Zt|qC9_2_PU~oJ+ZWj+;eE$9@Y;fK6S;zCobo4(4Il1SjOyB2F08M%eF`^ zsnZ?IIcoU(yMwU=5b|jRozit(NGWlAeEhaB>uY_&<>h57HVHsJK39bSj5c|p2TG|> zg&}%FzkX2f^zW|h;`Z_m;%)lE^PWoz3$a^`>Qn`_;d#8e{mpoD>l|wRatdfh^(bOz zNw*^fh++$Z5yD^y6$}vieF8S(nVJ2Y?17kda=l_t%B+}1Fq6j2Pcr#tVnPMCJB-li zDF_Wl$Gr$ky8(_`T5&;tHdiJgj76L?#kG9HM=50m{DAUlxUe8m$qU+F%nCr5P4D~} zxLrxl9pDz33D&s@EVkZf@VXGegniUvcvyOyue%9KCW07E?|cjsU^yfP zz3D_4Yz*_Y0DvEh)CBjQ+BOkx4RE7p!67slz^HHl%f^FUiBDX~#$H$+hwU;>&Q@Z} zVY-w>F5}X5+COUH&dN3(ZtSDk=@&8**(4j2_MW8?j&0jf;v{rBowtQqU#k*xgk07^#+C5H5S#l)xU;&8&AmEZ|EJEKz}YirzSJ5- z_oZzmoU#jR?;KZpY7=UN8|2c2tNZ`z@ zjdE5n-NTgTrXQ0ZQs@b3M|AvR)#A1%?HxHJe@(4UXb%+jn;|{FvhNe)pMQXnF;b4q zJSXSK)MTE@Yz$AwyaDXhFpM)u7{f%ka*7E+n9RY6$!SSoFfk{Jn~8kzE(T+enW3av zxi!wm@yG}xAO6myGDb_{2n0bu5l+0u#>U3m#;mXA zo5%f-76ALnv^(&7XitpRVHhR`TI(+&HWeR1Bw5e3F;mLo^lT9)W=r%YTl)>%THe9R zb`AZ(2%eirqBxi?xmcXeL%55$eUt#&GgvuZKp17{M=kX@e%Ky_jJItU!X_uvY|K^M zMPM@j34~4ku@vMK&)77wb3z|){1lnL1)lpYll=Uh?Ih3CoM~M+P8y36Q4T$FSCjM!|_KoL<`O- zGRT-*5>4AMuBn-ON>9ohLSp}$$4>}KhLM?&=Mb5EZi8nBqtBEb z<5=hnZM^!x!v|l<;Oa^dx!epcoS4E~HHUoGLDqB78;-HIQ^Wn$T^!U~$YnfSI=hH( zdFl*K%~vS}(r&jg==Viz7>$N>GYKJ7nzD(B=t(gydRJF1V<53&v!6f*xT@8RM4K6?ET{TC}0vhi^) z&de9#X6NzX_-TA;?HSxTT!e5d$QP^dvL#5b0LRNi*jZQvY7Qb;2B}tuKN{HBLj&i; z*w}+J$g+_Vh>YYut&P4%vzR~!8qMW}3~+kd0#PUE^N`V;#lxmTy`zZynsHf@#`kGx zNh-KV{g*jc(7DPvLE)$Zdz-=xzYHBV_83hCRuhOgGjC3cH`|<8W1qo-vEOX0*Ip0v z_-tdMr{UoluNK@04rcZn!isdAEP^p8cA+Nahrqxk1Ox0NJW@l21t8^E`e%I9w&T#z z#V@_+;_g-n*-Sb1z{;o;J>;_sD-!!l+nRh*j5W3G}z zA?woTI&O9GU}F!f+cmVi15B3-`1a>6;ya$Zgi{N%)G-ihf;aQ%EaWp@6dU@30eNT# z2L}l$J(0+Y-j#Y}@kJ1^Ju-p+*7OQkuzK|2p3P@jJM>9Km-_ zB41vFmzyFFO<0ok?7?h$Blp4$#q3RRSY+hN^kOEQnUcXhjl@+jTcKPBB$Ai{(N-aS@q3 zxyf@q$}1&qnGkjiS~foVx{Hs!p2Kl(8fO=0aAC1b706OP0{}RvxA9 z7{D&zTVpS5akfJLv{Uf1=za{zqDWyp8sXsRSj2`_t0`htnS|4sF#4m|@OCrnD|`bH zoAvlf{Ko>W=q%09P0Z|t zgh8la4@vXKc(j>d86H4r@aEyKoS$P37-1ZMxw1uhuzFj;kr8=kmrmH!o9Rl>dGfA+ zZSz3h!6>2}+U{`(Kjbze&lSmg@ehAeq16O525nZR;4%Jekw`Vs1YsV} z;j2+GG#BIC29;t`>)*WfxH;UMQV#QQ4k^v2Ci7#0t|`LE#+^-x54`H)Q*UO`8&|P7 zQ^Kja0_DFlo}}-5@2G|QF@WvXni!3JbUH0+@oUtNFkdZGM8S>Z9P0)_K=*v|8wf*{ z97on}cfy!iAl{7wdc{{jp?CtG*n|Ll#Dbg@&=8IVfsZf@AcO_aa{++9I29=k5CjIW zEgJwRoIE*O!inh|X38GMzEAmvn-8|JbI>5~BA<0pF6OXM%_DD(aQo^F{L=ehz!zRw z$5F>aCWLm%kfFM+cRE$!S!&S<;gkq8ElSL@z z=vV|HttpgToD!HW3Mc|=AnyU8VhQlxC?U%#Eg!=LgsDO*xonhGCt9NPXXBg|v(zU& zB4PA9PEaoRe3PT<4C$VTiDP2YQ=Aw{XNW=?ofDHF6-JLx%HU<9r_-#Y8eu85DUFql z%gZ)?{&|TP?&P7wG%lT<#>v?Ns>Lit556B_``|bZArEla=)xZdi6=z>wz;{5;b;iQ zkwl0!hOkg?ZmM|V!1FwKnGCYoET&3%6&oTI(p7)A0ojxAzM?M!tfI#RHwD1fmhl5v zgK-FH+oT^Bdye!a3L$8pumobD(?6_G^RPlXZ6WK$jk`1Wo@Y+r+`<$A&-&gmZZ0L% zjXFl-5ZR1Ng@Ku=JaS@)8?Sr;FTb&8u|OZ7PfkxSGD&d(uDobw=7rYF!E0%`JI z=QPrVwdPIjEf!{E32)|hWnF<26@jTdD51Z85K^eJedq@b|)AruUw%^CAb-sjPQ`nOoTJvEHWqkLRe#N8Wf`Pvjh@Fu~9uY9pML&Vf7espF;3NZy(Xi2zFZ zu}d+KeRm9K&z!Epl@<<;ySTHmOT~dsZ%8*^G3O>BDTSuR;yl(n33cOH+}b;Zo}VKL z7isJN=&U!!Q?K=YP?*KE3IU-(0Y<0ov-I!7?~) zte+Mt;MBB@vq=ukHirs(4sdG5f=B@tlU3R<1#Pert=Xr@Tr$NvVn)6XfI(vc0QShh zp*KtkAl6C)0Zpi202mMAnB<}{ftQWvoKBA813=!7*2kx9a4Qk*^IV$%XS*)&*=rs? z@>&j?M^(&E&n5sy{{XYDL&qnewX}JN<*h^FgGDO9pwqzN{tgZg4=K-;%Vyw62|*sO zMb|(&4xL*mg)mGkj4dFGB9&GuNu6c)poS~AAL6LhEzed6&#&BxNkvnXbrE)Zb~6(p~UbJJ&lx@o~&2` zVuHnQ&{4UE6TAiX#w^-zAgEHx^si`{@Bn$3FoPt_7BjP$z$4nCG=Wdv{@Q_s558)} zRmd#18)a0d$~Zk=K&6yHx!{4Mua8^geQg~y(bs~V{q`}A4|dS+G~ii^c#t&g7ohub zFc=~&j0D;n)9yTh7ZJie519;|14qYA++E(pz2z;mx_vm3h}>iTF5X$8rT97`voLG$ zkL~rP7?5rEM-M-LdF8Ku@#@;&h?~aWkhX|Im9*LPv2obJach9IaeI`w-9ezijDL7k z6i7^una^Z0WC>7Jb2es64yKD)>IxW+$5`DuBaOCUrm&_fn{G-ZX9Zl4kcJATsi`$`Rrx{yWe{N??Sbp9J% zMIKrhApt3(ZcLX1Hje`I$4XxpU@C9LU~cQFOD3rDa2Eh9TnQNSK~3rbnFb?6^2SiF zU~n+bWeOrPt%7oj6O}^^Xh^4io2nGh)Qrj3(!xYo0mYwzJ75{aiMOR-Og+ zsl12jQjUsFp$h4GJCbNWlq2&pt_H6RTvs=WAJ%dG?lSJKY@yli6Qw9V|9I=L{gWH} zja#t+fWq5&vA*5__5?h-5P$-I5ktK%3!t-8Yk&N6mskGMi#Im@L8CLc?n#?m@txxy zHV)e841JK-75GTiW@%d{=ZL(t?ZXDbP!aHzaxUhkJS@&kp&OmS<=q6(E@G!U4G0^aqd|$;Pe3{;iDhlL zN+@_-8Vf`6nq_l%U)BT8%vm@)OG9qcVIoAySl$b;w5PDL8&H)p**{kj$hknInCuxU z#6U(P<`4wjUX{+GUqglOAH6Cove?&|zByBEo}nZfOP5k8E69wej;WIQZn7E`q3vvx^m6I9WzH zp8+*-2T298K^(A{N$RiUB!G7e z{bHhO@r@5^6Z8vT|4o5T`nv$~Q3O8@Gr#cZHR+O4du8DGi#!L8Hmq!-zbx-Y{tn z=ZQ=}@>-&+(DQnbu)*7656x(l=WxudP;#_qsJ=2l^IY@EM)Q=(Q62FIzub2Kw9|4=Uu#bI|JL9#-&qL5QU8mmOCA_p$L=j}AN}x;fAZS*{>Ptr@z32{-n(-7{>em@4HEYj2}KZVuo|8^f%`z|Y}sy@LCP$2eQ5 zAptj?6Gudz zwPFMOko3t&eU_d_3P6gVUdZ92-Fxb+13%{l!;rZ~fofs|Q!~JA3-lh7*p=I|#@gDJ^ZRpM0X9-~X|4 zc042pT)C7Qzx|vu9`jSSjya==8_v^hcN}=aKbX z0=8yn08rTACfi##mN!0fW99Jq{&3s?;0YnJy0bum3RuF45Mk?}bvF)c@5T9$i_e^$ z`7Lp8=dX-!Pz znSG}8gAf3a(ilRn#0l^qVhXvyUP>6z&|W%fFczpaLBQ7Rrnw@0w>u!OW3~hYp&+U2 zjAPOJ={`?`k7dOMqw2>Zivp8vL=e26Jv*~v$;uNZQgWDT^2G#bjgdVIOO4^4_{|XG zLqvlI5SB~FgGCDIT3E(7Id1*prgB!iP3495n*w(?1vU>IsK~?V`4XiWGOj~aN)>^; zp?b4NK$aG4f^p)3b*Rh4(tV*}NS+jIOB%wPLYVq{)S<%4!us=B$Q6pn=8LdwiCVpZ zd^UsO*l(>Kc787YXZyb&Zr2~2KQ$wByNzOe-v%1MQZYqI+wU}w?6HCE>+h*qV6d|p z#ON{fzMS#-bq*~ERqx?e{b$$q8lSmza{711fb%DdIqzhx)u+~vI3ysjsJ8~IS03zq z=+0X0h4?;b0mumb`zxjF$DcVp{hQ7#lz(lr(<`;Q z1EO`zPZtT;+^hrFwUcla&921$popEdF3xlVq&cNuaRCUR5a_%aH)m5$3pOHCl+cO{ zJ$HjuuKOT8871t08VMIg39x}e$lV6DF`+vIf#j)Ki$bbE4;VJ*d7A378bEpY%A`l8 zLtO@^Xu|JiIcyc1ZFD?5*1Yr)mmbap3q zWzaVDs1AIImAyntI>gGh2hYi3zM4TUD=B&JQvOvDc*VWCBq>ces6zBRE%?I$f?#Z@ z{-*rFKm~AI&i~OU>!j zzyLr1_TVf~7l0bv+o=8gr(Rq7v++&vKYc$uNVq!9?&yBJ@8AB!C$IgTIF$To3}Ryd z1<>z`7jqs+r1U;&6R<2{3jz1I-QSJf`yc<*>ns2A-bVeG1|z@T>Gg4cZJ(4k4vyOZ zfUK7caLr(0I*V$#g2rG5wQdE+y%KKkRL~DgkPZ#vg-PsW4+eU=QrZH=Y;_*As{o`^ zf+~5$>%>a~=BEVFH0H||TrIkoTGjx1Xt-iOqm*m0p@ZA7|b_30$Jq)@{l1P`*PUldjgQj(B zk(Clz$@D_cb3dto?KmiwD_A&n4yDQr9O=MwEQCRL_{zvy27LV+$fK9MHQP%(C<Ke@iw{M>vs|8yK4-W&O0Pv_h6kIErx;1cm-?$Tfx?~g1h znZa&#hxN<%c78Fg6n*YnFV6qAZ@n=02iv_NI-5fja&62^6{tBqpK<6ve4rD%L8!1% zuVC5tFkd*v>Cz!)@_p9(((OAz$P9eTX7NM8`F5o3Oko1_0AZ8 zQvfWO#4CqeBvPqrW1fqspR zFK>xVKr{9$y`Sb~KnSll7&X>*8?XG9?>_ejv!wi)Oz|F}yF142Q42Gr91_O0=Q*jW zEH!nH?HmpVS?si`sATFmSE*sKlscLUB87*DLCoTu4=g~3q2P%@W}a8K4RM&t`tm7o ziB$SWC;N2*S~tL(;cKc$YfAFa^P0lUPjD^|VD4Cm$-c`pqz;9P^4VVlSM(4L>rg=t zAgLOmktsu7oBqrbLXHADPvrxy3j&yC&%)g;iQDTIe3cB#&0wZ1DRk2$QjB~bjrIV| zb~g#bj)kz9ho(1D<33-JRwoJ2(rq9j8c>}7imgO6AQgsi z9XrY8?#5NeAO6zSwI7Ys7h5`gQq=c#Pv85FzV{_K0to=_vSND=$leOn)Fad~#1o`q znCIdorUEB2`^M*Bugrq!!HqemM%$s@cInth_oNBa8zbQ2|F8^bmVTV9ocIChqLqsZ zgHe!#v(%chU2Eg8*+aFI$J}(0DyfdN;kgRgP{H@}@o^bk*_}qE(Zq$yAx=#7Auxhs zbTD`e1aL6DC}C6rSq57YXt9T7NH<4HbH~6#K0DBCS>j|l8m%5$ojw%3*23a^LUx_ObfrY(RQj6J*`^wSX)+QlI=_}?r;suX znM22UM+L`qk;xUw1CvsFU>49G2HP*+T>s%OTwVLQTBEy`dSCz|4Pq8}9r6A9=nCb# zHAnXNpvHjWYYPqU?a6p+5t~t3R%HBmtelk=xlY{z%bPWH`$Pdj)^$mrYP#%UVY&og zoW#}rXYu^!X?KfZTJbHSbvg z1JI++QWC4BJ;ljs;rtk!7ze=od?wBkzC>d{`9Mf(2o=DB(w`5;`y-?<)ugfATdGv`E}+k zLLL}kW3wi3W6i-<%|$+&r}2O;@#Z9XTA>O_HMrL3pxYY)l>C=CwK$8D^V29~GijR= z6KW$?QlxGWv-#b~f${tX3vMP$z~y)uKu`seNUn{}aP^J5o8KR|+Py!f|7-wob;K~# z_aB)2P3s}2Sje7uALV;MHo2%rfSN`2m5^j73lThah)qJLU|?2Z9hk><^^MZe05&o; z(CB@v#|pXm7fc#cky4~!K;?ER0Z^|dvA4&eLR=(3cNn1F>{6q8G3OB%EzxK9q=Y1r zCXjIi#z8VPrHYMa3#UqlI8$lDv&S@S1ki+5Lkfrs228RA9}u)%{j?r#0SXWv-*>6qVmq>D`jaGp1!HAf$kjJsN_ububx z9+1((F}%TVk=pDpQ?JYfHuEu8Ehk%>z(7it?(-R_G|;hEmepW3*3W14>oSiq{w`9H zVy3p>a=T8>Q(v9!*d$HuNgGpsNMP3PkJ0VL)<44jQ5&BfL|fHUO=vd&oZ(8ypjm~mSSSPjEep5+E>daXXrY*yrOH4KwAU{9*Dl1-{$ z7z!8kTFc@&kme#-A}H-LfDK`jlN_%-z;+=d^~oX8kzN#6$MObY_Ml0z-(uqZyGwr20xbvS3Nym(`$s@wjSeiQZiR78EBof5tZ15 z1w)eCb0p<#iUd+VS_zm9#%OhGG=Ozsb}9z7BK6jVic}a-%*pr|7b}e-w%R=`M6`H9OzIyKg**7Yv@t5fu z-Wq?Aj}v+-eqJ}5vjJxHQ-BkwL!=%YgItFJH}%@+Gdlm@&qx!UeURrRC4>|tT}(GV z0s?>({BmJVFJkaK`0$8V8oh!pv3+n5&j(27%I5!n|i=9C+C57u`#RUZhRZ|&AUl*5l2uHz!qK1ASMJ(+oT)lY(d+YnidkeT&O+rXXo{uFcfsUhQ zkAyM(AfWGIH0q<#XwrBzCZ%!_0-y$NzC)lDh5<-H)kUO?e9qy4a$n`j40&Rarm}ch z58DULt1(gff4_Qr5^tl0^8#cHY77{b31W|> zXU3XloUc=03t_f6WjMb_p8>SZodhMY=jTmc>r@vD1PbYp6vuIhs2TJ!hnka( z`23vT(_y4-*>J23M!vx9jXrMOxsRj0UF2kd(=$2ZU^zDBwMbO3-s;f|$h3$s9u0AL zSfhU=p;n-9Byq>+ABU8ZJT5^wM()GZvAAw)qW7nNP62&?PTB`aK$gl5Nyo}|S1Q~mQoQL4&UA^w>b6K>_mCT`D zM|!^vUW^K@A1cx{o1MyIx}2k%KcywuuB-d(ETkG_k(M!zEJaCX&&!Y}2cd8(8Co%2 zM7=%4-f@={JL3EqC9UK98HJqXVir=504wJe`a_B4xQJIaXEB|v;nH*+bEP3%CxD`E z0^)|H-WaWUB%0A^3NR??l$lw209DwtK7+x%Tsm(|9TfqNh&HfUnqli~QB!b`Ed% zCH@=OQv(F6lh7E z)(^3D*h z4=kmO@B|^l6^@J?k(u8jI;wFbH*dr2GB92TT8NJ(Y}EP=HXn9y_vRfm_STV=Aui14 zK^~a2D6bX&?Zn56JsL<&tr!pcI6khUR&PQ|o6K>j0ts&UqFD}fomCi8_{pD()mfD# zU_};=;&W_l@4?BIQ7t+2&qkAfqZp*V^tm^e{@2ar{VyhMZ!rnl&bZP|IEAs=N+%)O zo?EO~#at$ewZa?~pCF(0qHlj{F*tXk5+&ffymf@{e)i;K=>zXAGEtg*2QI-M`Ay#z z0O)62N5(ghbM?Wt7lT?Ff`S3+EkaHPvyis=c}X@&Ebi)aI2yp(FuMWz8qEACipGv@ z5pyBYJL~i#3bMvK+Q~{E0*aFJk6KhpvC!Ls zhkUTmB8QZm>|wSoC`oP+wi^WmIs|5JErN}$n!vRc7hClKT=!!rx)Dy#&j|B!Qvry1EDx zqJG@O&Owc+F}5usT_Vu}fQtKezwrFE)&IxdH2~SMb=`g1x39-DNk(y=ZNJ~P?ZR){ zwr!iAo~_uY2T(PW4Tv2f?hnvG!WO^U{&&XEEzqE4YsH zW%umTFvF4_s{ZNoiy^+@8n<6F(i!fnz{#n3eR8%YzpwD~AGkL7$n(djb(nXp=;7P8 z!o{UEcy(l9dM%YN&9=N>n*aPlV%gHZGZqqm=%J4;9_tK2>|FM z%gK722`I%-3{psNa}EI<-lOH_b2IZKvy5+KRCeYai`JhUD*(^cF_W|4m6-~hz-P8} zFTh}N4L`3(D>ZSY6>%O(QtA;9F-{tSEYl!MslY13TaB5EyxJ6(o+A|sgcvw`Nx<%t z8afh5`S1etRZJ-4@VG=ymAdz&zFJ#tsA(eM0C>%HTH+1@k3OF&Es5Gfo(o<;CFgp* zNyC+*T~O}y9%W^5Ml4iw-=u$o`GrO7M=lUiS!g=lH+3PWlflGn?W2#peEN?t1A87p zu!(=qCMuv*#y51YBL^@T>8}{C9GkXKDL8P3x})7kr_JZ~pM$Ub)SEiDTr~{;^vHfV zbb1!9-7=VEV6P#>&wbmc!G|%1QLhI1I@XhW&)8EJYc{6)<`wvAbU!`SgK`FLr$-6n+$ z{e)U?HH%?=fbQNw6?vWJ*NVaw)i z;59CSp(XyI@|u)$l+7tL(=_EhYMUg%Ju$Dt?h_%*)-0%&iqfTuGB4MTd0>rJ8~b0B zbtc)K?{{c-@jR_ni47R8e`x55h!k0s%`yFf$O$d;~J>wyTCc$1+39 z(lvbZif}07>zbe@zT91t)R5N>U|AS)0`g%D{BuWc&T4=9T|3)1Zr>=dr)EC0|EzOj za?WOZm^{*BZY@=M(oZ$p4DcBUdZ{TV-(<*RX4pVkpMRBV7??SJ+K%Dg&u|^1NB)p3 zt+xxd_v5kv$sc}It~@4~UgE;zHaoNNmeod6Ky z_;vu$zO=@5$+$I}Isz^eN?uxdL9RD7)N6|~u;sdqS!y1AAT5##w@rh-9;%4Y1afJ4 zjs8x^Yr$(n9kZ`yV+}$z{gpwqiCA%hK%Wy;3FH7s%d1#I zF2W*S!wBO2SeH0^?NjF$|NG-Fo&1aAlMBz`=WGCg#oo9)Y=VhW!SQ#F_6Ivgd&7T! z^04-K_g(E{CLHut3$T5p_f2G8S%`oA#jj4?%Hqh`OXd@MPCJp-iy2JJ>Y5iaDrmP&~~;i@HB8QXnKR48a@6mL@8bLXw}XSz7Q z6cc$7z-;QFMNRB!{X3SBe{y1K{s3lRg7r_^e7E@eZ5y;(c5U(wp1J_rhI@S^GHd-^ zg~)ZRw}1ndh4}6-zYi`SS(LGUdu;b98~YVG+<$W}v*6ATffpy!ras1EU?!C8Kv^T{L-64Txyrk|DShPK#fV0sK zFoSD?2v^B4MFLti?~_D0fT2oFow#W_0ogX#zI_|`^^2g1q_m_=$IvyRQLHos?mz+@ zo6-?LY;p)H7c9yu9gFyN#Ql<#l6oViIS2r9A8M!uz7UX*QrW&H-|3*Qt?nn8EQ